From 2ec3b13cdeb722a082c462bb323cf8b725addb73 Mon Sep 17 00:00:00 2001 From: Rohan Yadav Date: Tue, 27 Aug 2019 16:09:47 -0700 Subject: [PATCH] demo: Add a flag to automatically apply geo-partitioned replicas. Work for #39945. This PR adds a Partition function to the workload.Hooks struct so that workloads that have a partitioning step can just implement that function within the workload package. Release note (cli change): Add an option for cockroach demo to automatically apply the geo-partitioned replicas topology to the movr dataset using the --geo-partitioned-replicas flag. --- pkg/ccl/cliccl/demo.go | 1 + pkg/ccl/workloadccl/allccl/all_test.go | 2 +- pkg/cli/cliflags/flags.go | 12 ++ pkg/cli/context.go | 10 +- pkg/cli/demo.go | 96 ++++++++++++++-- pkg/cli/flags.go | 1 + .../test_demo_partitioning.tcl | 87 +++++++++++++++ pkg/workload/movr/movr.go | 105 +++++++++++++++++- pkg/workload/workload.go | 3 + 9 files changed, 301 insertions(+), 16 deletions(-) create mode 100644 pkg/cli/interactive_tests/test_demo_partitioning.tcl diff --git a/pkg/ccl/cliccl/demo.go b/pkg/ccl/cliccl/demo.go index af2ccb084e99..1199f50ecbcf 100644 --- a/pkg/ccl/cliccl/demo.go +++ b/pkg/ccl/cliccl/demo.go @@ -46,6 +46,7 @@ func getLicense(clusterID uuid.UUID) (string, error) { return "", err } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { return "", errors.New("unable to connect to licensing endpoint") } diff --git a/pkg/ccl/workloadccl/allccl/all_test.go b/pkg/ccl/workloadccl/allccl/all_test.go index ba3b6c68a506..6aeb0472296f 100644 --- a/pkg/ccl/workloadccl/allccl/all_test.go +++ b/pkg/ccl/workloadccl/allccl/all_test.go @@ -251,7 +251,7 @@ func TestDeterministicInitialData(t *testing.T) { `intro`: 0x81c6a8cfd9c3452a, `json`: 0xcbf29ce484222325, `ledger`: 0xebe27d872d980271, - `movr`: 0x4f19a54c7e779f9c, + `movr`: 0x6a094e9d15a07970, `queue`: 0xcbf29ce484222325, `rand`: 0xcbf29ce484222325, `roachmart`: 0xda5e73423dbdb2d9, diff --git a/pkg/cli/cliflags/flags.go b/pkg/cli/cliflags/flags.go index 507a03783a9d..33cce90296c2 100644 --- a/pkg/cli/cliflags/flags.go +++ b/pkg/cli/cliflags/flags.go @@ -918,6 +918,18 @@ to us-east1 and availability zone to 3. `, } + DemoGeoPartitionedReplicas = FlagInfo{ + Name: "geo-partitioned-replicas", + Description: ` +When used with the Movr dataset, create a 9 node cluster and automatically apply +the geo-partitioned replicas topology across 3 virtual regions named us-east1, us-west1, and +europe-west1. This command will fail with an error if an enterprise license could not +be acquired, or if the Movr dataset is not used. More information about the geo-partitioned +replicas topology can be found at this URL: +https://www.cockroachlabs.com/docs/v19.1/topology-geo-partitioned-replicas.html + `, + } + UseEmptyDatabase = FlagInfo{ Name: "empty", Description: ` diff --git a/pkg/cli/context.go b/pkg/cli/context.go index fbe8ce82b105..c7ec94d74edf 100644 --- a/pkg/cli/context.go +++ b/pkg/cli/context.go @@ -147,6 +147,7 @@ func initCLIDefaults() { demoCtx.useEmptyDatabase = false demoCtx.runWorkload = false demoCtx.localities = nil + demoCtx.geoPartitionedReplicas = false initPreFlagsDefaults() @@ -336,8 +337,9 @@ var sqlfmtCtx struct { // demoCtx captures the command-line parameters of the `demo` command. // Defaults set by InitCLIDefaults() above. var demoCtx struct { - nodes int - useEmptyDatabase bool - runWorkload bool - localities demoLocalityList + nodes int + useEmptyDatabase bool + runWorkload bool + localities demoLocalityList + geoPartitionedReplicas bool } diff --git a/pkg/cli/demo.go b/pkg/cli/demo.go index 56c001b044f1..91608ee12754 100644 --- a/pkg/cli/demo.go +++ b/pkg/cli/demo.go @@ -49,10 +49,10 @@ subcommands: e.g. "cockroach demo startrek". See --help for a full list. By default, the 'movr' dataset is pre-loaded. You can also use --empty to avoid pre-loading a dataset. -cockroach demo attempts to connect to a Cockroach Labs server to obtain a -temporary enterprise license for demoing enterprise features and enable -telemetry back to Cockroach Labs. In order to disable this behavior, set the -environment variable "COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING". +cockroach demo attempts to connect to a Cockroach Labs server to obtain a +temporary enterprise license for demoing enterprise features and enable +telemetry back to Cockroach Labs. In order to disable this behavior, set the +environment variable "COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING" to true. `, Example: ` cockroach demo`, Args: cobra.NoArgs, @@ -207,6 +207,10 @@ func setupTransientServers( } urlStr := url.String() + // Communicate information about license acquisition to services + // that depend on it. + licenseSuccess := make(chan bool, 1) + // Start up the update check loop. // We don't do this in (*server.Server).Start() because we don't want it // in tests. @@ -230,8 +234,13 @@ func setupTransientServers( const msg = "Unable to acquire demo license. Enterprise features are not enabled in this session.\n" fmt.Fprint(stderr, msg) } + licenseSuccess <- success }() } + } else { + // If we aren't supposed to check for a license, then automatically + // notify failure. + licenseSuccess <- false } // If there is a load generator, create its database and load its @@ -253,10 +262,40 @@ func setupTransientServers( return ``, ``, cleanup, err } + partitioningComplete := make(chan struct{}, 1) + // If we are requested to prepartition our data spawn a goroutine to do the partitioning. + if demoCtx.geoPartitionedReplicas { + go func() { + success := <-licenseSuccess + // Only try partitioning if license acquisition was successful. + if success { + db, err := gosql.Open("postgres", urlStr) + if err != nil { + exitWithError("demo", err) + } + defer db.Close() + // Based on validation done in setup, we know that this workload has a partitioning step. + if err := gen.(workload.Hookser).Hooks().Partition(db); err != nil { + exitWithError("demo", err) + } + partitioningComplete <- struct{}{} + } else { + const msg = "license acquisition was unsuccessful. Enterprise features are needed to partition data" + exitWithError("demo", errors.New(msg)) + } + }() + } + if demoCtx.runWorkload { - if err := runWorkload(ctx, gen, urlStr, stopper); err != nil { - return ``, ``, cleanup, err - } + go func() { + // If partitioning was requested, wait for that to complete before running the workload. + if demoCtx.geoPartitionedReplicas { + <-partitioningComplete + } + if err := runWorkload(ctx, gen, urlStr, stopper); err != nil { + exitWithError("demo", err) + } + }() } } @@ -319,6 +358,49 @@ func runDemo(cmd *cobra.Command, gen workload.Generator) error { return errors.New("cannot run a workload against an empty database") } + // Make sure that the user didn't request to have a topology and an empty database. + if demoCtx.geoPartitionedReplicas && demoCtx.useEmptyDatabase { + return errors.New("cannot setup geo-partitioned replicas topology on an empty database") + } + + // Make sure that the Movr database is selected when automatically partitioning. + if demoCtx.geoPartitionedReplicas && (gen == nil || gen.Meta().Name != "movr") { + return errors.New("--geo-partitioned-replicas must be used with the Movr dataset") + } + + // If the geo-partitioned replicas flag was given and the demo localities have changed, throw an error. + if demoCtx.geoPartitionedReplicas && demoCtx.localities != nil { + return errors.New("--demo-locality cannot be used with --geo-partitioned-replicas") + } + + // If the geo-partitioned replicas flag was given and the nodes have changed, throw an error. + if demoCtx.geoPartitionedReplicas && cmd.Flags().Lookup(cliflags.DemoNodes.Name).Changed { + return errors.New("--nodes cannot be used with --geo-partitioned-replicas") + } + + // If geo-partition-replicas is requested, make sure the workload has a Partitioning step. + if demoCtx.geoPartitionedReplicas { + configErr := errors.New(fmt.Sprintf("workload %s is not configured to have a partitioning step", gen.Meta().Name)) + hookser, ok := gen.(workload.Hookser) + if !ok { + return configErr + } + if hookser.Hooks().Partition == nil { + return configErr + } + } + + // Th geo-partitioned replicas demo only works on a 9 node cluster, so set the node count as such. + // Ignore input user localities so that the nodes have the same attributes/localities as expected. + if demoCtx.geoPartitionedReplicas { + const msg = `# +# --geo-partitioned replicas operates on a 9 node cluster. +# The cluster size has been changed from the default to 9 nodes.` + fmt.Println(msg) + demoCtx.nodes = 9 + demoCtx.localities = nil + } + connURL, adminURL, cleanup, err := setupTransientServers(cmd, gen) defer cleanup() if err != nil { diff --git a/pkg/cli/flags.go b/pkg/cli/flags.go index 112e7ac26c76..39d15a606ff9 100644 --- a/pkg/cli/flags.go +++ b/pkg/cli/flags.go @@ -586,6 +586,7 @@ func init() { IntFlag(demoFlags, &demoCtx.nodes, cliflags.DemoNodes, 1) BoolFlag(demoFlags, &demoCtx.runWorkload, cliflags.RunDemoWorkload, false) VarFlag(demoFlags, &demoCtx.localities, cliflags.DemoNodeLocality) + BoolFlag(demoFlags, &demoCtx.geoPartitionedReplicas, cliflags.DemoGeoPartitionedReplicas, false) // The --empty flag is only valid for the top level demo command, // so we use the regular flag set. BoolFlag(demoCmd.Flags(), &demoCtx.useEmptyDatabase, cliflags.UseEmptyDatabase, false) diff --git a/pkg/cli/interactive_tests/test_demo_partitioning.tcl b/pkg/cli/interactive_tests/test_demo_partitioning.tcl new file mode 100644 index 000000000000..bb7bf140d416 --- /dev/null +++ b/pkg/cli/interactive_tests/test_demo_partitioning.tcl @@ -0,0 +1,87 @@ +#! /usr/bin/env expect -f + +source [file join [file dirname $argv0] common.tcl] + +start_test "Expect partitioning succeeds" +# test that partitioning works if a license could be acquired +spawn $argv demo --geo-partitioned-replicas + +# wait for the shell to start up +eexpect "movr>" + +# send multiple "SHOW PARTITIONS" requests to the DB as partitioning is happen asynchronously. +for {set i 0} {$i < 10} {incr i} { + send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\];\r" + sleep 1 +} + +# The number of partitions across the MovR database we expect is 24. +eexpect "24" +eexpect "(1 row)" +eexpect "movr>" + +send "SHOW PARTITIONS FROM TABLE vehicles;\r" + +# Verify the partitions are as we expect +send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='us_west';\r" +eexpect "8" +eexpect "(1 row)" +eexpect "movr>" + +send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='us_west' AND partition_value='(''seattle''), (''san francisco''), (''los angeles'')';\r" +eexpect "8" +eexpect "(1 row)" +eexpect "movr>" + +send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='us_west' AND zone_config='constraints = ''\[+region=us-west1\]''';\r" +eexpect "8" +eexpect "(1 row)" +eexpect "movr>" + +send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='us_east';\r" +eexpect "8" +eexpect "(1 row)" +eexpect "movr>" + +send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='us_east' AND partition_value='(''new york''), (''boston''), (''washington dc'')';\r" +eexpect "8" +eexpect "(1 row)" +eexpect "movr>" + +send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='us_east' AND zone_config='constraints = ''\[+region=us-east1\]''';\r" +eexpect "8" +eexpect "(1 row)" +eexpect "movr>" + +send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='europe_west';\r" +eexpect "8" +eexpect "(1 row)" +eexpect "movr>" + +send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='europe_west' AND partition_value='(''amsterdam''), (''paris''), (''rome'')';\r" +eexpect "8" +eexpect "(1 row)" +eexpect "movr>" + +send "SELECT count(*) FROM \[SHOW PARTITIONS FROM DATABASE movr\] WHERE partition_name='europe_west' AND zone_config='constraints = ''\[+region=europe-west1\]''';\r" +eexpect "8" +eexpect "(1 row)" +eexpect "movr>" + +interrupt +eexpect eof +end_test + + +start_test "Expect an error if geo-partitioning is requested and a license cannot be acquired" + +# set the proper environment variable +set env(COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING) "true" +spawn $argv demo --geo-partitioned-replicas +# expect a failure +eexpect "Error: license acquisition was unsuccessful. Enterprise features are needed to partition data" +# clean up after the test +interrupt +eexpect eof +end_test + diff --git a/pkg/workload/movr/movr.go b/pkg/workload/movr/movr.go index 3615604fc404..61a8e126981e 100644 --- a/pkg/workload/movr/movr.go +++ b/pkg/workload/movr/movr.go @@ -112,9 +112,6 @@ var cities = []struct { {city: "seattle", locality: "us_west"}, {city: "san francisco", locality: "us_west"}, {city: "los angeles", locality: "us_west"}, - {city: "chicago", locality: "us_central"}, - {city: "detroit", locality: "us_central"}, - {city: "minneapolis", locality: "us_central"}, {city: "amsterdam", locality: "eu_west"}, {city: "paris", locality: "eu_west"}, {city: "rome", locality: "eu_west"}, @@ -210,8 +207,108 @@ func (g *movr) Hooks() workload.Hooks { } } } + return nil + }, + // This partitioning step is intended for a 3 region cluster, which have the localities region=us-east1, + // region=us-west1, region=europe-west1. + Partition: func(db *gosql.DB) error { + // Create us-west, us-east and europe-west partitions. + q := ` + ALTER TABLE users PARTITION BY LIST (city) ( + PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'), + PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'), + PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome') + ); + ALTER TABLE vehicles PARTITION BY LIST (city) ( + PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'), + PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'), + PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome') + ); + ALTER INDEX vehicles_auto_index_fk_city_ref_users PARTITION BY LIST (city) ( + PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'), + PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'), + PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome') + ); + ALTER TABLE rides PARTITION BY LIST (city) ( + PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'), + PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'), + PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome') + ); + ALTER INDEX rides_auto_index_fk_city_ref_users PARTITION BY LIST (city) ( + PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'), + PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'), + PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome') + ); + ALTER INDEX rides_auto_index_fk_vehicle_city_ref_vehicles PARTITION BY LIST (vehicle_city) ( + PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'), + PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'), + PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome') + ); + ALTER TABLE user_promo_codes PARTITION BY LIST (city) ( + PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'), + PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'), + PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome') + ); + ALTER TABLE vehicle_location_histories PARTITION BY LIST (city) ( + PARTITION us_west VALUES IN ('seattle', 'san francisco', 'los angeles'), + PARTITION us_east VALUES IN ('new york', 'boston', 'washington dc'), + PARTITION europe_west VALUES IN ('amsterdam', 'paris', 'rome') + ); + ` + if _, err := db.Exec(q); err != nil { + return err + } + + // Alter the partitions to place replicas in the appropriate zones. + q = ` + ALTER PARTITION us_west OF INDEX users@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-west1"]'; + ALTER PARTITION us_east OF INDEX users@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-east1"]'; + ALTER PARTITION europe_west OF INDEX users@* CONFIGURE ZONE USING CONSTRAINTS='["+region=europe-west1"]'; + + ALTER PARTITION us_west OF INDEX vehicles@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-west1"]'; + ALTER PARTITION us_east OF INDEX vehicles@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-east1"]'; + ALTER PARTITION europe_west OF INDEX vehicles@* CONFIGURE ZONE USING CONSTRAINTS='["+region=europe-west1"]'; + + ALTER PARTITION us_west OF INDEX rides@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-west1"]'; + ALTER PARTITION us_east OF INDEX rides@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-east1"]'; + ALTER PARTITION europe_west OF INDEX rides@* CONFIGURE ZONE USING CONSTRAINTS='["+region=europe-west1"]'; + + ALTER PARTITION us_west OF INDEX user_promo_codes@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-west1"]'; + ALTER PARTITION us_east OF INDEX user_promo_codes@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-east1"]'; + ALTER PARTITION europe_west OF INDEX user_promo_codes@* CONFIGURE ZONE USING CONSTRAINTS='["+region=europe-west1"]'; + + ALTER PARTITION us_west OF INDEX vehicle_location_histories@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-west1"]'; + ALTER PARTITION us_east OF INDEX vehicle_location_histories@* CONFIGURE ZONE USING CONSTRAINTS='["+region=us-east1"]'; + ALTER PARTITION europe_west OF INDEX vehicle_location_histories@* CONFIGURE ZONE USING CONSTRAINTS='["+region=europe-west1"]'; + ` + if _, err := db.Exec(q); err != nil { + return err + } + + // Create some duplicate indexes for the promo_codes table. + q = ` + CREATE INDEX promo_codes_idx_us_west ON promo_codes (code) STORING (description, creation_time, expiration_time, rules); + CREATE INDEX promo_codes_idx_europe_west ON promo_codes (code) STORING (description, creation_time, expiration_time, rules); + ` + if _, err := db.Exec(q); err != nil { + return err + } - // TODO(dan): Partitions. + // Apply configurations to the index for fast reads. + q = ` + ALTER TABLE promo_codes CONFIGURE ZONE USING num_replicas = 3, + constraints = '{"+region=us-east1": 1}', + lease_preferences = '[[+region=us-east1]]'; + ALTER INDEX promo_codes@promo_codes_idx_us_west CONFIGURE ZONE USING + constraints = '{"+region=us-west1": 1}', + lease_preferences = '[[+region=us-west1]]'; + ALTER INDEX promo_codes@promo_codes_idx_europe_west CONFIGURE ZONE USING + constraints = '{"+region=europe-west1": 1}', + lease_preferences = '[[+region=europe-west1]]'; + ` + if _, err := db.Exec(q); err != nil { + return err + } return nil }, } diff --git a/pkg/workload/workload.go b/pkg/workload/workload.go index 79bdd542c59a..f6579cbb6176 100644 --- a/pkg/workload/workload.go +++ b/pkg/workload/workload.go @@ -107,6 +107,9 @@ type Hooks struct { // These are expected to pass after the initial data load as well as after // running queryload. CheckConsistency func(context.Context, *gosql.DB) error + // Partition is used to run a partitioning step on the data created by the workload. + // TODO (rohany): migrate existing partitioning steps (such as tpcc's) into here. + Partition func(*gosql.DB) error } // Meta is used to register a Generator at init time and holds meta information