diff --git a/pkg/ccl/partitionccl/partition_test.go b/pkg/ccl/partitionccl/partition_test.go index 3931d86c8d45..e88a39059233 100644 --- a/pkg/ccl/partitionccl/partition_test.go +++ b/pkg/ccl/partitionccl/partition_test.go @@ -12,8 +12,10 @@ import ( "bytes" "context" gosql "database/sql" + "flag" "fmt" "math/rand" + "strconv" "strings" "testing" "time" @@ -56,9 +58,13 @@ type partitioningTest struct { // table name should be. schema string - // configs are each a shorthand for a zone config, formatted as - // `@index_name` or `.partition_name`. Optionally a suffix of a colon and a - // comma-separated list of constraints may be included (`@index_name:+dc1`). + // configs are each a shorthand for a zone config for leaseholder preferences, + // Constraints will be added such that data is constrained to the dc of the + // leaseholder. + // + // The values are formatted as `@index_name` or `.partition_name`. + // Optionally a suffix of a colon and a comma-separated list of constraints + // may be included (`@index_name:+dc1`). // These will be parsed into `parsed.subzones`. configs []string @@ -102,13 +108,14 @@ type partitioningTest struct { } } -type repartitioningTest struct { - index string - old, new partitioningTest -} - // parse fills in the various fields of `partitioningTest.parsed`. -func (pt *partitioningTest) parse() error { +// If useLeasePreferences is true then the constraint will be interpretted +// as a lease preference a new constraint which constrains replicas to a zone +// which has a dc value that matches the node value in the constraint. +// +// For example, if the constraint is `n1` and we're using lease preferences +// we'll parse the lease preference as `n1` and the constraint as `zone=dc1`. +func (pt *partitioningTest) parse(useLeaseholderPrefs bool) error { if pt.parsed.parsed { return nil } @@ -177,27 +184,45 @@ func (pt *partitioningTest) parse() error { return errors.Wrapf(err, "could not find index %s", indexName) } subzone.IndexID = uint32(idxDesc.ID) + var leasePreferences string + if useLeaseholderPrefs { + leasePreferences = constraints + constraints = strings.Replace(constraints, "n", "zone=dc", -1) + } + if len(constraints) > 0 { if subzone.PartitionName == "" { - fmt.Fprintf(&zoneConfigStmts, - `ALTER INDEX %s@%s CONFIGURE ZONE USING constraints = '[%s]';`, - pt.parsed.tableName, idxDesc.Name, constraints, - ) + _, _ = fmt.Fprintf(&zoneConfigStmts, + `ALTER INDEX %s@%s CONFIGURE ZONE USING constraints = '[%s]'`, + pt.parsed.tableName, idxDesc.Name, constraints) } else { - fmt.Fprintf(&zoneConfigStmts, - `ALTER PARTITION %s OF INDEX %s@%s CONFIGURE ZONE USING constraints = '[%s]';`, - subzone.PartitionName, pt.parsed.tableName, idxDesc.Name, constraints, - ) + _, _ = fmt.Fprintf(&zoneConfigStmts, + `ALTER PARTITION %s OF INDEX %s@%s CONFIGURE ZONE USING constraints = '[%s]'`, + subzone.PartitionName, pt.parsed.tableName, idxDesc.Name, constraints) } - } + if useLeaseholderPrefs { + _, _ = fmt.Fprintf(&zoneConfigStmts, ", lease_preferences = '[[%q]]'", leasePreferences) + } + _, _ = fmt.Fprintf(&zoneConfigStmts, ";\n") - var parsedConstraints config.ConstraintsList - if err := yaml.UnmarshalStrict([]byte("["+constraints+"]"), &parsedConstraints); err != nil { - return errors.Wrapf(err, "parsing constraints: %s", constraints) + var parsedConstraints config.ConstraintsList + if err := yaml.UnmarshalStrict([]byte("["+constraints+"]"), &parsedConstraints); err != nil { + return errors.Wrapf(err, "parsing constraints: %s", constraints) + } + subzone.Config.Constraints = parsedConstraints.Constraints + subzone.Config.InheritedConstraints = parsedConstraints.Inherited + if useLeaseholderPrefs { + var parsedLeasePreferences config.ConstraintsList + if err := yaml.UnmarshalStrict([]byte("["+leasePreferences+"]"), &parsedLeasePreferences); err != nil { + return errors.Wrapf(err, "parsing lease preferences: %s", constraints) + } + for _, c := range parsedLeasePreferences.Constraints { + subzone.Config.LeasePreferences = append(subzone.Config.LeasePreferences, + config.LeasePreference{Constraints: c.Constraints}) + } + subzone.Config.InheritedLeasePreferences = parsedLeasePreferences.Inherited + } } - subzone.Config.Constraints = parsedConstraints.Constraints - subzone.Config.InheritedConstraints = parsedConstraints.Inherited - pt.parsed.subzones = append(pt.parsed.subzones, subzone) } pt.parsed.zoneConfigStmts = zoneConfigStmts.String() @@ -897,179 +922,6 @@ func allPartitioningTests(rng *rand.Rand) []partitioningTest { return tests } -func allRepartitioningTests(partitioningTests []partitioningTest) ([]repartitioningTest, error) { - tests := []repartitioningTest{ - { - index: `primary`, - old: partitioningTest{name: `unpartitioned`}, - new: partitioningTest{name: `unpartitioned`}, - }, - { - index: `primary`, - old: partitioningTest{name: `unpartitioned`}, - new: partitioningTest{name: `single col list partitioning`}, - }, - { - index: `primary`, - old: partitioningTest{name: `unpartitioned`}, - new: partitioningTest{name: `single col list partitioning - DEFAULT`}, - }, - { - index: `primary`, - old: partitioningTest{name: `unpartitioned`}, - new: partitioningTest{name: `single col range partitioning`}, - }, - { - index: `primary`, - old: partitioningTest{name: `unpartitioned`}, - new: partitioningTest{name: `single col range partitioning - MAXVALUE`}, - }, - - { - index: `primary`, - old: partitioningTest{name: `single col list partitioning`}, - new: partitioningTest{name: `single col list partitioning - DEFAULT`}, - }, - { - index: `primary`, - old: partitioningTest{name: `single col list partitioning - DEFAULT`}, - new: partitioningTest{name: `single col list partitioning`}, - }, - { - index: `primary`, - old: partitioningTest{name: `multi col list partitioning`}, - new: partitioningTest{name: `multi col list partitioning - DEFAULT`}, - }, - { - index: `primary`, - old: partitioningTest{name: `multi col list partitioning - DEFAULT`}, - new: partitioningTest{name: `multi col list partitioning`}, - }, - { - index: `primary`, - old: partitioningTest{name: `multi col list partitioning - DEFAULT`}, - new: partitioningTest{name: `multi col list partitioning - DEFAULT DEFAULT`}, - }, - { - index: `primary`, - old: partitioningTest{name: `multi col list partitioning - DEFAULT DEFAULT`}, - new: partitioningTest{name: `multi col list partitioning - DEFAULT`}, - }, - - { - index: `primary`, - old: partitioningTest{name: `single col range partitioning`}, - new: partitioningTest{name: `single col range partitioning - MAXVALUE`}, - }, - { - index: `primary`, - old: partitioningTest{name: `single col range partitioning - MAXVALUE`}, - new: partitioningTest{name: `single col range partitioning`}, - }, - { - index: `primary`, - old: partitioningTest{name: `multi col range partitioning`}, - new: partitioningTest{name: `multi col range partitioning - MAXVALUE`}, - }, - { - index: `primary`, - old: partitioningTest{name: `multi col range partitioning - MAXVALUE`}, - new: partitioningTest{name: `multi col range partitioning`}, - }, - { - index: `primary`, - old: partitioningTest{name: `multi col range partitioning - MAXVALUE`}, - new: partitioningTest{name: `multi col range partitioning - MAXVALUE MAXVALUE`}, - }, - { - index: `primary`, - old: partitioningTest{name: `multi col range partitioning - MAXVALUE MAXVALUE`}, - new: partitioningTest{name: `multi col range partitioning - MAXVALUE`}, - }, - - { - index: `primary`, - old: partitioningTest{name: `single col list partitioning`}, - new: partitioningTest{name: `single col range partitioning`}, - }, - { - index: `primary`, - old: partitioningTest{name: `single col range partitioning`}, - new: partitioningTest{name: `single col list partitioning`}, - }, - - // TODO(dan): One repartitioning is fully implemented, these tests also - // need to pass with no ccl code. - { - index: `primary`, - old: partitioningTest{name: `single col list partitioning`}, - new: partitioningTest{name: `unpartitioned`}, - }, - { - index: `primary`, - old: partitioningTest{name: `single col list partitioning - DEFAULT`}, - new: partitioningTest{name: `unpartitioned`}, - }, - { - index: `primary`, - old: partitioningTest{name: `single col range partitioning`}, - new: partitioningTest{name: `unpartitioned`}, - }, - { - index: `primary`, - old: partitioningTest{name: `single col range partitioning - MAXVALUE`}, - new: partitioningTest{name: `unpartitioned`}, - }, - - { - index: `b_idx`, - old: partitioningTest{name: `secondary index - unpartitioned`}, - new: partitioningTest{name: `secondary index - list partitioning`}, - }, - { - index: `b_idx`, - old: partitioningTest{name: `secondary index - list partitioning`}, - new: partitioningTest{name: `secondary index - unpartitioned`}, - }, - { - index: `b_idx`, - old: partitioningTest{name: `secondary index - list partitioning`}, - new: partitioningTest{name: `secondary index - list partitioning - DEFAULT`}, - }, - { - index: `b_idx`, - old: partitioningTest{name: `secondary index - list partitioning - DEFAULT`}, - new: partitioningTest{name: `secondary index - list partitioning`}, - }, - } - - partitioningTestsByName := make(map[string]partitioningTest, len(partitioningTests)) - for _, partitioningTest := range partitioningTests { - partitioningTestsByName[partitioningTest.name] = partitioningTest - } - for i := range tests { - t, ok := partitioningTestsByName[tests[i].old.name] - if !ok { - return nil, errors.Errorf("unknown partitioning test: %s", tests[i].old.name) - } - tests[i].old = t - if err := tests[i].old.parse(); err != nil { - return nil, err - } - - t, ok = partitioningTestsByName[tests[i].new.name] - if !ok { - return nil, errors.Errorf("unknown partitioning test: %s", tests[i].new.name) - } - tests[i].new = t - if err := tests[i].new.parse(); err != nil { - return nil, err - } - } - - return tests, nil -} - func verifyScansOnNode( ctx context.Context, t *testing.T, db *gosql.DB, query string, node string, ) error { @@ -1095,7 +947,8 @@ func verifyScansOnNode( } traceLines = append(traceLines, traceLine.String) if strings.Contains(traceLine.String, "read completed") { - if strings.Contains(traceLine.String, "SystemCon") { + if strings.Contains(traceLine.String, "SystemCon") || + strings.Contains(traceLine.String, "Min-") { // Ignore trace lines for the system config range (abbreviated as // "SystemCon" in pretty printing of the range descriptor). A read might // be performed to the system config range to update the table lease. @@ -1119,37 +972,68 @@ func verifyScansOnNode( return nil } +type partitioningTestClusterArgs struct { + numDCs, nodesPerDC int + numReplicas int +} + +func (a partitioningTestClusterArgs) String() string { + return fmt.Sprintf("nodes=%d/dcs=%d", a.numNodes(), a.numDCs) +} + +func (a *partitioningTestClusterArgs) numNodes() int { + return a.numDCs * a.nodesPerDC +} + func setupPartitioningTestCluster( - ctx context.Context, t testing.TB, + ctx context.Context, t testing.TB, args partitioningTestClusterArgs, ) (*gosql.DB, *sqlutils.SQLRunner, func()) { cfg := config.DefaultZoneConfig() - cfg.NumReplicas = proto.Int32(1) + cfg.NumReplicas = proto.Int32(int32(args.numReplicas)) - tsArgs := func(attr string) base.TestServerArgs { + tsArgs := func(attr, locality string) base.TestServerArgs { + storeKnobs := &storage.StoreTestingKnobs{ + // Disable LBS because when the scan is happening at the rate it's happening + // below, it's possible that one of the system ranges trigger a split. + DisableLoadBasedSplitting: true, + } + serverKnobs := &server.TestingKnobs{ + DefaultZoneConfigOverride: &cfg, + } return base.TestServerArgs{ Knobs: base.TestingKnobs{ - Store: &storage.StoreTestingKnobs{ - // Disable LBS because when the scan is happening at the rate it's happening - // below, it's possible that one of the system ranges trigger a split. - DisableLoadBasedSplitting: true, - }, - Server: &server.TestingKnobs{ - DefaultZoneConfigOverride: &cfg, - }, + Store: storeKnobs, + Server: serverKnobs, }, ScanInterval: 100 * time.Millisecond, StoreSpecs: []base.StoreSpec{ {InMemory: true, Attributes: roachpb.Attributes{Attrs: []string{attr}}}, }, UseDatabase: "data", + Locality: roachpb.Locality{ + Tiers: []roachpb.Tier{ + { + Key: "zone", + Value: locality, + }, + }, + }, + } + } + numNodes := args.numDCs * args.nodesPerDC + tcArgs := func(numNodes int) base.TestClusterArgs { + serverArgs := make(map[int]base.TestServerArgs) + for i := 0; i < numNodes; i++ { + serverArgs[i] = tsArgs( + "n"+strconv.Itoa(i+1), + "dc"+strconv.Itoa((i%args.numDCs)+1), + ) + } + return base.TestClusterArgs{ + ServerArgsPerNode: serverArgs, } } - tcArgs := base.TestClusterArgs{ServerArgsPerNode: map[int]base.TestServerArgs{ - 0: tsArgs("n1"), - 1: tsArgs("n2"), - 2: tsArgs("n3"), - }} - tc := testcluster.StartTestCluster(t, 3, tcArgs) + tc := testcluster.StartTestCluster(t, numNodes, tcArgs(numNodes)) sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) sqlDB.Exec(t, `CREATE DATABASE data`) @@ -1179,7 +1063,11 @@ func TestInitialPartitioning(t *testing.T) { testCases := allPartitioningTests(rng) ctx := context.Background() - db, sqlDB, cleanup := setupPartitioningTestCluster(ctx, t) + db, sqlDB, cleanup := setupPartitioningTestCluster(ctx, t, partitioningTestClusterArgs{ + numDCs: 3, + nodesPerDC: 1, + numReplicas: 1, + }) defer cleanup() for _, test := range testCases { @@ -1187,7 +1075,7 @@ func TestInitialPartitioning(t *testing.T) { continue } t.Run(test.name, func(t *testing.T) { - if err := test.parse(); err != nil { + if err := test.parse(false); err != nil { t.Fatalf("%+v", err) } sqlDB.Exec(t, test.parsed.createStmt) @@ -1216,7 +1104,7 @@ func TestSelectPartitionExprs(t *testing.T) { PARTITION pdd VALUES IN ((DEFAULT, DEFAULT)) )`, } - if err := testData.parse(); err != nil { + if err := testData.parse(false); err != nil { t.Fatalf("%+v", err) } @@ -1273,6 +1161,9 @@ func TestSelectPartitionExprs(t *testing.T) { }) } +var numRepartitioningTests = flag.Int("num-repartitioning-tests", 10, + "run this many random repartitioning tests, set non-positive for all") + func TestRepartitioning(t *testing.T) { defer leaktest.AfterTest(t)() @@ -1288,78 +1179,311 @@ func TestRepartitioning(t *testing.T) { t.Fatalf("%+v", err) } + // NB: These tests take quite a while to run. + // We want to ensure that the various configurations don't rot but we also + // don't always want to run all of them as it would take to long. + // To deal with that we just pick a random sampling of all of the + // configurations to run unless a special flag is passed to this test. + // This random sampling should prevent rot. + rng.Shuffle(len(testCases), func(i, j int) { + testCases[i], testCases[j] = testCases[j], testCases[i] + }) + + var ran int + for _, test := range testCases { + if toRun := *numRepartitioningTests; toRun > 0 && ran >= *numRepartitioningTests { + break + } + t.Run(test.String(), test.run) + if test.ran { + ran++ + } + } +} + +type repartitioningTest struct { + index string + old, new partitioningTest + clusterConfig partitioningTestClusterArgs + + // ran is used to detect whether this subtest was skipped due to the test.run + // flag. + ran bool +} + +func (rt *repartitioningTest) String() string { + return fmt.Sprintf("%s/%s/%s", rt.clusterConfig, rt.old.name, rt.new.name) +} + +func (rt *repartitioningTest) run(t *testing.T) { + defer leaktest.AfterTest(t)() + defer func() { rt.ran = true }() ctx := context.Background() - db, sqlDB, cleanup := setupPartitioningTestCluster(ctx, t) + db, sqlDB, cleanup := setupPartitioningTestCluster(ctx, t, rt.clusterConfig) defer cleanup() + sqlDB.Exec(t, `DROP DATABASE IF EXISTS data`) + sqlDB.Exec(t, `CREATE DATABASE data`) + { + sqlDB.Exec(t, rt.old.parsed.createStmt) + fmt.Println(rt.old.parsed.zoneConfigStmts) + sqlDB.Exec(t, rt.old.parsed.zoneConfigStmts) - for _, test := range testCases { - t.Run(fmt.Sprintf("%s/%s", test.old.name, test.new.name), func(t *testing.T) { - sqlDB.Exec(t, `DROP DATABASE IF EXISTS data`) - sqlDB.Exec(t, `CREATE DATABASE data`) + testutils.SucceedsSoon(t, rt.old.verifyScansFn(ctx, t, db)) + } + { + sqlDB.Exec(t, fmt.Sprintf("ALTER TABLE %s RENAME TO %s", rt.old.parsed.tableName, rt.new.parsed.tableName)) - { - if err := test.old.parse(); err != nil { - t.Fatalf("%+v", err) - } - sqlDB.Exec(t, test.old.parsed.createStmt) - sqlDB.Exec(t, test.old.parsed.zoneConfigStmts) + testIndex, _, err := rt.new.parsed.tableDesc.FindIndexByName(rt.index) + if err != nil { + t.Fatalf("%+v", err) + } - testutils.SucceedsSoon(t, test.old.verifyScansFn(ctx, t, db)) + var repartition bytes.Buffer + if testIndex.ID == rt.new.parsed.tableDesc.PrimaryIndex.ID { + fmt.Fprintf(&repartition, `ALTER TABLE %s `, rt.new.parsed.tableName) + } else { + fmt.Fprintf(&repartition, `ALTER INDEX %s@%s `, rt.new.parsed.tableName, testIndex.Name) + } + if testIndex.Partitioning.NumColumns == 0 { + repartition.WriteString(`PARTITION BY NOTHING`) + } else { + if err := sql.ShowCreatePartitioning( + &sqlbase.DatumAlloc{}, rt.new.parsed.tableDesc, testIndex, + &testIndex.Partitioning, &repartition, 0 /* indent */, 0, /* colOffset */ + ); err != nil { + t.Fatalf("%+v", err) } + } + sqlDB.Exec(t, repartition.String()) - { - if err := test.new.parse(); err != nil { - t.Fatalf("%+v", err) - } - sqlDB.Exec(t, fmt.Sprintf("ALTER TABLE %s RENAME TO %s", test.old.parsed.tableName, test.new.parsed.tableName)) + // Verify that repartitioning removes zone configs for partitions that + // have been removed. + newPartitionNames := map[string]struct{}{} + for _, name := range rt.new.parsed.tableDesc.PartitionNames() { + newPartitionNames[name] = struct{}{} + } + for _, row := range sqlDB.QueryStr( + t, "SELECT partition_name FROM crdb_internal.zones WHERE partition_name IS NOT NULL") { + partitionName := row[0] + if _, ok := newPartitionNames[partitionName]; !ok { + t.Errorf("zone config for removed partition %q exists after repartitioning", partitionName) + } + } - testIndex, _, err := test.new.parsed.tableDesc.FindIndexByName(test.index) - if err != nil { - t.Fatalf("%+v", err) - } + // NB: Not all old zone configurations are removed. This statement will + // overwrite any with the same name and the repartitioning removes any + // for partitions that no longer exist, but there could still be some + // sitting around (e.g., when a repartitioning preserves a partition but + // does not apply a new zone config). This is fine. + sqlDB.Exec(t, rt.new.parsed.zoneConfigStmts) + testutils.SucceedsSoon(t, rt.new.verifyScansFn(ctx, t, db)) + } +} - var repartition bytes.Buffer - if testIndex.ID == test.new.parsed.tableDesc.PrimaryIndex.ID { - fmt.Fprintf(&repartition, `ALTER TABLE %s `, test.new.parsed.tableName) - } else { - fmt.Fprintf(&repartition, `ALTER INDEX %s@%s `, test.new.parsed.tableName, testIndex.Name) - } - if testIndex.Partitioning.NumColumns == 0 { - repartition.WriteString(`PARTITION BY NOTHING`) - } else { - if err := sql.ShowCreatePartitioning( - &sqlbase.DatumAlloc{}, test.new.parsed.tableDesc, testIndex, - &testIndex.Partitioning, &repartition, 0 /* indent */, 0, /* colOffset */ - ); err != nil { - t.Fatalf("%+v", err) - } - } - sqlDB.Exec(t, repartition.String()) +func allRepartitioningTests( + partitioningTests []partitioningTest, +) (tests []repartitioningTest, _ error) { + base := []repartitioningTest{ + { + index: `primary`, + old: partitioningTest{name: `unpartitioned`}, + new: partitioningTest{name: `unpartitioned`}, + }, + { + index: `primary`, + old: partitioningTest{name: `unpartitioned`}, + new: partitioningTest{name: `single col list partitioning`}, + }, + { + index: `primary`, + old: partitioningTest{name: `unpartitioned`}, + new: partitioningTest{name: `single col list partitioning - DEFAULT`}, + }, + { + index: `primary`, + old: partitioningTest{name: `unpartitioned`}, + new: partitioningTest{name: `single col range partitioning`}, + }, + { + index: `primary`, + old: partitioningTest{name: `unpartitioned`}, + new: partitioningTest{name: `single col range partitioning - MAXVALUE`}, + }, - // Verify that repartitioning removes zone configs for partitions that - // have been removed. - newPartitionNames := map[string]struct{}{} - for _, name := range test.new.parsed.tableDesc.PartitionNames() { - newPartitionNames[name] = struct{}{} + { + index: `primary`, + old: partitioningTest{name: `single col list partitioning`}, + new: partitioningTest{name: `single col list partitioning - DEFAULT`}, + }, + { + index: `primary`, + old: partitioningTest{name: `single col list partitioning - DEFAULT`}, + new: partitioningTest{name: `single col list partitioning`}, + }, + { + index: `primary`, + old: partitioningTest{name: `multi col list partitioning`}, + new: partitioningTest{name: `multi col list partitioning - DEFAULT`}, + }, + { + index: `primary`, + old: partitioningTest{name: `multi col list partitioning - DEFAULT`}, + new: partitioningTest{name: `multi col list partitioning`}, + }, + { + index: `primary`, + old: partitioningTest{name: `multi col list partitioning - DEFAULT`}, + new: partitioningTest{name: `multi col list partitioning - DEFAULT DEFAULT`}, + }, + { + index: `primary`, + old: partitioningTest{name: `multi col list partitioning - DEFAULT DEFAULT`}, + new: partitioningTest{name: `multi col list partitioning - DEFAULT`}, + }, + + { + index: `primary`, + old: partitioningTest{name: `single col range partitioning`}, + new: partitioningTest{name: `single col range partitioning - MAXVALUE`}, + }, + { + index: `primary`, + old: partitioningTest{name: `single col range partitioning - MAXVALUE`}, + new: partitioningTest{name: `single col range partitioning`}, + }, + { + index: `primary`, + old: partitioningTest{name: `multi col range partitioning`}, + new: partitioningTest{name: `multi col range partitioning - MAXVALUE`}, + }, + { + index: `primary`, + old: partitioningTest{name: `multi col range partitioning - MAXVALUE`}, + new: partitioningTest{name: `multi col range partitioning`}, + }, + { + index: `primary`, + old: partitioningTest{name: `multi col range partitioning - MAXVALUE`}, + new: partitioningTest{name: `multi col range partitioning - MAXVALUE MAXVALUE`}, + }, + { + index: `primary`, + old: partitioningTest{name: `multi col range partitioning - MAXVALUE MAXVALUE`}, + new: partitioningTest{name: `multi col range partitioning - MAXVALUE`}, + }, + + { + index: `primary`, + old: partitioningTest{name: `single col list partitioning`}, + new: partitioningTest{name: `single col range partitioning`}, + }, + { + index: `primary`, + old: partitioningTest{name: `single col range partitioning`}, + new: partitioningTest{name: `single col list partitioning`}, + }, + + // TODO(dan): Once repartitioning is fully implemented, these tests also + // need to pass with no ccl code. + { + index: `primary`, + old: partitioningTest{name: `single col list partitioning`}, + new: partitioningTest{name: `unpartitioned`}, + }, + { + index: `primary`, + old: partitioningTest{name: `single col list partitioning - DEFAULT`}, + new: partitioningTest{name: `unpartitioned`}, + }, + { + index: `primary`, + old: partitioningTest{name: `single col range partitioning`}, + new: partitioningTest{name: `unpartitioned`}, + }, + { + index: `primary`, + old: partitioningTest{name: `single col range partitioning - MAXVALUE`}, + new: partitioningTest{name: `unpartitioned`}, + }, + + { + index: `b_idx`, + old: partitioningTest{name: `secondary index - unpartitioned`}, + new: partitioningTest{name: `secondary index - list partitioning`}, + }, + { + index: `b_idx`, + old: partitioningTest{name: `secondary index - list partitioning`}, + new: partitioningTest{name: `secondary index - unpartitioned`}, + }, + { + index: `b_idx`, + old: partitioningTest{name: `secondary index - list partitioning`}, + new: partitioningTest{name: `secondary index - list partitioning - DEFAULT`}, + }, + { + index: `b_idx`, + old: partitioningTest{name: `secondary index - list partitioning - DEFAULT`}, + new: partitioningTest{name: `secondary index - list partitioning`}, + }, + } + + partitioningTestsByName := make(map[string]partitioningTest, len(partitioningTests)) + for _, partitioningTest := range partitioningTests { + partitioningTestsByName[partitioningTest.name] = partitioningTest + } + + // All of the test rely on there being exactly 3 DCs, or in the case where + // useLeaseholderPrefs is false, 3 nodes. + const numDCs = 3 + + setupRepartitioningTest := func( + base repartitioningTest, + useLeaseholderPrefs bool, // skipped if nodesPerDC != 1 + nodesPerDC int, + ) (repartitioningTest, error) { + t := base + t.clusterConfig.numReplicas = nodesPerDC + t.clusterConfig.nodesPerDC = nodesPerDC + t.clusterConfig.numDCs = numDCs + pt, ok := partitioningTestsByName[t.old.name] + if !ok { + return repartitioningTest{}, errors.Errorf("unknown partitioning test: %s", t.old.name) + } + t.old = pt + if err := t.old.parse(useLeaseholderPrefs); err != nil { + return repartitioningTest{}, err + } + + pt, ok = partitioningTestsByName[t.new.name] + if !ok { + return repartitioningTest{}, errors.Errorf("unknown partitioning test: %s", t.new.name) + } + t.new = pt + if err := t.new.parse(useLeaseholderPrefs); err != nil { + return repartitioningTest{}, err + } + return t, nil + } + + for i := range base { + for _, useLeaseholderPrefs := range []bool{true, false} { + for _, nodesPerDC := range []int{1, 3, 5} { + // It is only sane to not use leaseholder preferences if there is a + // single replica. + if !useLeaseholderPrefs && nodesPerDC != 1 { + continue } - for _, row := range sqlDB.QueryStr( - t, "SELECT partition_name FROM crdb_internal.zones WHERE partition_name IS NOT NULL") { - partitionName := row[0] - if _, ok := newPartitionNames[partitionName]; !ok { - t.Errorf("zone config for removed partition %q exists after repartitioning", partitionName) - } + t, err := setupRepartitioningTest(base[i], useLeaseholderPrefs, nodesPerDC) + if err != nil { + return nil, err } - - // NB: Not all old zone configurations are removed. This statement will - // overwrite any with the same name and the repartitioning removes any - // for partitions that no longer exist, but there could still be some - // sitting around (e.g., when a repartitioning preserves a partition but - // does not apply a new zone config). This is fine. - sqlDB.Exec(t, test.new.parsed.zoneConfigStmts) - testutils.SucceedsSoon(t, test.new.verifyScansFn(ctx, t, db)) + tests = append(tests, t) } - }) + } } + + return tests, nil } func TestRemovePartitioningExpiredLicense(t *testing.T) { diff --git a/pkg/ccl/partitionccl/zone_test.go b/pkg/ccl/partitionccl/zone_test.go index 047df424f8cb..e5446db575c6 100644 --- a/pkg/ccl/partitionccl/zone_test.go +++ b/pkg/ccl/partitionccl/zone_test.go @@ -283,7 +283,7 @@ func TestGenerateSubzoneSpans(t *testing.T) { continue } t.Run(test.name, func(t *testing.T) { - if err := test.parse(); err != nil { + if err := test.parse(false /* useLeaseholderPrefs */); err != nil { t.Fatalf("%+v", err) } clusterID := uuid.MakeV4()