diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index f51873075da3..bbbca3cdd60b 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -104,10 +104,6 @@ func backupRestoreTestSetupWithParams( if _, err := workloadsql.Setup(ctx, sqlDB.DB.(*gosql.DB), bankData, l); err != nil { t.Fatalf("%+v", err) } - if err := workloadsql.Split(ctx, sqlDB.DB.(*gosql.DB), bankData.Tables()[0], 1 /* concurrency */); err != nil { - // This occasionally flakes, so ignore errors. - t.Logf("failed to split: %+v", err) - } if err := tc.WaitForFullReplication(); err != nil { t.Fatal(err) diff --git a/pkg/ccl/importccl/exportcsv_test.go b/pkg/ccl/importccl/exportcsv_test.go index 25c63e3f639b..3ca5f90e3ea2 100644 --- a/pkg/ccl/importccl/exportcsv_test.go +++ b/pkg/ccl/importccl/exportcsv_test.go @@ -55,9 +55,6 @@ func setupExportableBank(t *testing.T, nodes, rows int) (*sqlutils.SQLRunner, st zoneConfig := config.DefaultZoneConfig() zoneConfig.RangeMaxBytes = proto.Int64(5000) config.TestingSetZoneConfig(last+1, zoneConfig) - if err := workloadsql.Split(ctx, conn, wk.Tables()[0], 1 /* concurrency */); err != nil { - t.Fatal(err) - } db.Exec(t, "ALTER TABLE bank SCATTER") db.Exec(t, "SELECT 'force a scan to repopulate range cache' FROM [SELECT count(*) FROM bank]") diff --git a/pkg/ccl/workloadccl/cliccl/fixtures.go b/pkg/ccl/workloadccl/cliccl/fixtures.go index c769ac3f8711..bba7580985d3 100644 --- a/pkg/ccl/workloadccl/cliccl/fixtures.go +++ b/pkg/ccl/workloadccl/cliccl/fixtures.go @@ -318,13 +318,6 @@ func fixturesLoad(gen workload.Generator, urls []string, dbName string) error { return err } - const splitConcurrency = 384 // TODO(dan): Don't hardcode this. - for _, table := range gen.Tables() { - if err := workloadsql.Split(ctx, sqlDB, table, splitConcurrency); err != nil { - return errors.Wrapf(err, `splitting %s`, table.Name) - } - } - if hooks, ok := gen.(workload.Hookser); *fixturesRunChecks && ok { if consistencyCheckFn := hooks.Hooks().CheckConsistency; consistencyCheckFn != nil { log.Info(ctx, "fixture is imported; now running consistency checks (ctrl-c to abort)") @@ -357,13 +350,6 @@ func fixturesImport(gen workload.Generator, urls []string, dbName string) error return err } - const splitConcurrency = 384 // TODO(dan): Don't hardcode this. - for _, table := range gen.Tables() { - if err := workloadsql.Split(ctx, sqlDB, table, splitConcurrency); err != nil { - return errors.Wrapf(err, `splitting %s`, table.Name) - } - } - if hooks, ok := gen.(workload.Hookser); *fixturesRunChecks && ok { if consistencyCheckFn := hooks.Hooks().CheckConsistency; consistencyCheckFn != nil { log.Info(ctx, "fixture is restored; now running consistency checks (ctrl-c to abort)") diff --git a/pkg/cmd/roachtest/hotspotsplits.go b/pkg/cmd/roachtest/hotspotsplits.go index dac267ce6be7..b4bd3da84d52 100644 --- a/pkg/cmd/roachtest/hotspotsplits.go +++ b/pkg/cmd/roachtest/hotspotsplits.go @@ -50,7 +50,7 @@ func registerHotSpotSplits(r *testRegistry) { const blockSize = 1 << 19 // 512 KB return c.RunL(ctx, quietL, appNode, fmt.Sprintf( - "./workload run kv --read-percent=0 --splits=0 --tolerate-errors --concurrency=%d "+ + "./workload run kv --read-percent=0 --tolerate-errors --concurrency=%d "+ "--min-block-bytes=%d --max-block-bytes=%d --duration=%s {pgurl:1-3}", concurrency, blockSize, blockSize, duration.String())) }) diff --git a/pkg/cmd/roachtest/kv.go b/pkg/cmd/roachtest/kv.go index 5fac44482531..ddceac802d82 100644 --- a/pkg/cmd/roachtest/kv.go +++ b/pkg/cmd/roachtest/kv.go @@ -550,17 +550,16 @@ func registerKVRangeLookups(r *testRegistry) { m := newMonitor(ctx, c, c.Range(1, nodes)) m.Go(func(ctx context.Context) error { defer close(doneWorkload) - cmd := fmt.Sprintf("./workload init kv {pgurl:1-%d}", nodes) + cmd := fmt.Sprintf("./workload init kv {pgurl:1-%d} --splits=1000", nodes) c.Run(ctx, c.Node(nodes+1), cmd) close(doneInit) concurrency := ifLocal("", " --concurrency="+fmt.Sprint(nodes*64)) - splits := " --splits=1000" duration := " --duration=" + ifLocal("10s", "10m") readPercent := " --read-percent=50" // We run kv with --tolerate-errors, since the relocate workload is // expected to create `result is ambiguous (removing replica)` errors. cmd = fmt.Sprintf("./workload run kv --tolerate-errors"+ - concurrency+splits+duration+readPercent+ + concurrency+duration+readPercent+ " {pgurl:1-%d}", nodes) start := timeutil.Now() c.Run(ctx, c.Node(nodes+1), cmd) diff --git a/pkg/cmd/roachtest/rebalance_load.go b/pkg/cmd/roachtest/rebalance_load.go index b53b62590e42..17a5b00ccc75 100644 --- a/pkg/cmd/roachtest/rebalance_load.go +++ b/pkg/cmd/roachtest/rebalance_load.go @@ -48,6 +48,7 @@ func registerRebalanceLoad(r *testRegistry) { ) { roachNodes := c.Range(1, c.spec.NodeCount-1) appNode := c.Node(c.spec.NodeCount) + splits := len(roachNodes) - 1 // n-1 splits => n ranges => 1 lease per node c.Put(ctx, cockroach, "./cockroach", roachNodes) args := startArgs( @@ -55,7 +56,7 @@ func registerRebalanceLoad(r *testRegistry) { c.Start(ctx, t, roachNodes, args) c.Put(ctx, workload, "./workload", appNode) - c.Run(ctx, appNode, `./workload init kv --drop {pgurl:1}`) + c.Run(ctx, appNode, fmt.Sprintf("./workload init kv --drop --splits=%d {pgurl:1}", splits)) var m *errgroup.Group // see comment in version.go m, ctx = errgroup.WithContext(ctx) @@ -74,11 +75,10 @@ func registerRebalanceLoad(r *testRegistry) { } defer quietL.close() - splits := len(roachNodes) - 1 // n-1 splits => n ranges => 1 lease per node err = c.RunL(ctx, quietL, appNode, fmt.Sprintf( - "./workload run kv --read-percent=95 --splits=%d --tolerate-errors --concurrency=%d "+ + "./workload run kv --read-percent=95 --tolerate-errors --concurrency=%d "+ "--duration=%v {pgurl:1-%d}", - splits, concurrency, maxDuration, len(roachNodes))) + concurrency, maxDuration, len(roachNodes))) if ctx.Err() == context.Canceled { // We got canceled either because lease balance was achieved or the // other worker hit an error. In either case, it's not this worker's diff --git a/pkg/workload/cli/run.go b/pkg/workload/cli/run.go index 372892fde3ad..04a46ecddaa3 100644 --- a/pkg/workload/cli/run.go +++ b/pkg/workload/cli/run.go @@ -348,13 +348,6 @@ func runRun(gen workload.Generator, urls []string, dbName string) error { log.Infof(ctx, "retrying after error while creating load: %v", err) } - const splitConcurrency = 384 // TODO(dan): Don't hardcode this. - for _, table := range gen.Tables() { - if err := workloadsql.Split(ctx, initDB, table, splitConcurrency); err != nil { - return err - } - } - start := timeutil.Now() errCh := make(chan error) var rampDone chan struct{} diff --git a/pkg/workload/workloadsql/workloadsql.go b/pkg/workload/workloadsql/workloadsql.go index 40e3b0418aad..8b033411ebfe 100644 --- a/pkg/workload/workloadsql/workloadsql.go +++ b/pkg/workload/workloadsql/workloadsql.go @@ -40,6 +40,13 @@ func Setup( return 0, err } + const splitConcurrency = 384 // TODO(dan): Don't hardcode this. + for _, table := range gen.Tables() { + if err := Split(ctx, db, table, splitConcurrency); err != nil { + return 0, err + } + } + var hooks workload.Hooks if h, ok := gen.(workload.Hookser); ok { hooks = h.Hooks()