From 18f8836fdb2dd3d7ae81fa992aaaf5425b754db2 Mon Sep 17 00:00:00 2001 From: Nathan VanBenschoten Date: Thu, 30 Apr 2020 14:25:44 -0400 Subject: [PATCH 1/4] kv: declare write access to AbortSpan on all aborting EndTxn reqs Fixes #43707. Fixes #48046. Fixes #48189. Part of the change made by #42765 was to clear AbortSpan entries on non-poisoning, aborting EndTxn requests. Specifically, this change was made in 1328787. The change forgot to update the corresponding span declaration logic to reflect the fact that we were now writing to the AbortSpan in cases where we previously weren't. This was triggering an assertion in race builds that tried to catch this kind of undeclared span access. The assertion failure was very rare because it required the following conditions to all be met: 1. running a test with the race detector enabled 2. a txn (A) must have been aborted by another txn (B) 3. txn B must have cleared an intent on txn A's transaction record range 4. txn A must have noticed and issued a non-poisoning EndTxn(ABORT) We should backport this when we get a change (once v20.1.0 has stabilized), but I don't expect that this could actually cause any issues. The AbortSpan update was strictly a matter of performance and we should never be racing with another request that is trying to read the same AbortSpan entry. --- pkg/kv/kvserver/batcheval/cmd_end_transaction.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pkg/kv/kvserver/batcheval/cmd_end_transaction.go b/pkg/kv/kvserver/batcheval/cmd_end_transaction.go index 77f06a125469..05ce64a1a20e 100644 --- a/pkg/kv/kvserver/batcheval/cmd_end_transaction.go +++ b/pkg/kv/kvserver/batcheval/cmd_end_transaction.go @@ -69,7 +69,12 @@ func declareKeysEndTxn( header.Txn.AssertInitialized(context.TODO()) minTxnTS = header.Txn.MinTimestamp abortSpanAccess := spanset.SpanReadOnly - if !et.Commit && et.Poison { + if !et.Commit { + // Rollback EndTxn requests may write to the abort span, either if + // their Poison flag is set, in which case they will add an abort + // span entry, or if their Poison flag is not set and an abort span + // entry already exists on this Range, in which case they will clear + // that entry. abortSpanAccess = spanset.SpanReadWrite } latchSpans.AddNonMVCC(abortSpanAccess, roachpb.Span{ From 233377899aacaa2f7d7a41de5b6e65bc5a4d1f15 Mon Sep 17 00:00:00 2001 From: Nathan VanBenschoten Date: Wed, 29 Apr 2020 21:45:53 -0400 Subject: [PATCH 2/4] sql: delete scanNode.isDeleteSource This has been unused for a while, possibly since Delete statements were pulled into the optimizer. --- pkg/sql/delete_range.go | 3 +-- pkg/sql/distsql_plan_stats.go | 2 +- pkg/sql/scan.go | 3 --- pkg/sql/scrub_physical.go | 2 +- pkg/sql/span/span_builder.go | 4 ++-- 5 files changed, 5 insertions(+), 9 deletions(-) diff --git a/pkg/sql/delete_range.go b/pkg/sql/delete_range.go index 28277891e5da..f00594e32652 100644 --- a/pkg/sql/delete_range.go +++ b/pkg/sql/delete_range.go @@ -92,8 +92,7 @@ func maybeCreateDeleteFastNode( } // Check whether the source plan is "simple": that it contains no remaining - // filtering, limiting, sorting, etc. Note that this logic must be kept in - // sync with the logic for setting scanNode.isDeleteSource (see doExpandPlan.) + // filtering, limiting, sorting, etc. // TODO(dt): We could probably be smarter when presented with an // index-join, but this goes away anyway once we push-down more of // SQL. diff --git a/pkg/sql/distsql_plan_stats.go b/pkg/sql/distsql_plan_stats.go index de54c6bb2312..ce67a7b561a6 100644 --- a/pkg/sql/distsql_plan_stats.go +++ b/pkg/sql/distsql_plan_stats.go @@ -83,7 +83,7 @@ func (dsp *DistSQLPlanner) createStatsPlan( return PhysicalPlan{}, err } sb := span.MakeBuilder(desc.TableDesc(), scan.index) - scan.spans, err = sb.UnconstrainedSpans(scan.isDeleteSource) + scan.spans, err = sb.UnconstrainedSpans() if err != nil { return PhysicalPlan{}, err } diff --git a/pkg/sql/scan.go b/pkg/sql/scan.go index 398796b29852..9165bb17f98e 100644 --- a/pkg/sql/scan.go +++ b/pkg/sql/scan.go @@ -109,9 +109,6 @@ type scanNode struct { // scan is guaranteed to return. maxResults uint64 - // Indicates if this scan is the source for a delete node. - isDeleteSource bool - // estimatedRowCount is the estimated number of rows that this scanNode will // output. When there are no statistics to make the estimation, it will be // set to zero. diff --git a/pkg/sql/scrub_physical.go b/pkg/sql/scrub_physical.go index 20b3fd0ea012..650a1796f3a3 100644 --- a/pkg/sql/scrub_physical.go +++ b/pkg/sql/scrub_physical.go @@ -113,7 +113,7 @@ func (o *physicalCheckOperation) Start(params runParams) error { } scan.index = scan.specifiedIndex sb := span.MakeBuilder(o.tableDesc.TableDesc(), o.indexDesc) - scan.spans, err = sb.UnconstrainedSpans(false /* forDelete */) + scan.spans, err = sb.UnconstrainedSpans() if err != nil { return err } diff --git a/pkg/sql/span/span_builder.go b/pkg/sql/span/span_builder.go index 132e22dac006..1aa3054d7f9a 100644 --- a/pkg/sql/span/span_builder.go +++ b/pkg/sql/span/span_builder.go @@ -219,8 +219,8 @@ func (s *Builder) SpansFromConstraint( // UnconstrainedSpans returns the full span corresponding to the Builder's // table and index. -func (s *Builder) UnconstrainedSpans(forDelete bool) (roachpb.Spans, error) { - return s.SpansFromConstraint(nil, exec.TableColumnOrdinalSet{}, forDelete) +func (s *Builder) UnconstrainedSpans() (roachpb.Spans, error) { + return s.SpansFromConstraint(nil, exec.TableColumnOrdinalSet{}, false /* forDelete */) } // appendSpansFromConstraintSpan converts a constraint.Span to one or more From f1fae7ef366bcc3ec423303612cd92ad578125a8 Mon Sep 17 00:00:00 2001 From: Nathan VanBenschoten Date: Wed, 29 Apr 2020 21:55:17 -0400 Subject: [PATCH 3/4] sql: pull full scan determination into execFactory, out of DistSQLPlanner This removes the only instance where the DistSQLPlanner was encoding keys. It allows us to avoid giving it a SQL codec in the next commit. --- pkg/sql/distsql_physical_planner.go | 2 +- pkg/sql/distsql_plan_stats.go | 1 + pkg/sql/opt_exec_factory.go | 1 + pkg/sql/scan.go | 4 ++++ pkg/sql/scrub_physical.go | 1 + 5 files changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go index 5a77278ff91e..a87db7b8ac28 100644 --- a/pkg/sql/distsql_physical_planner.go +++ b/pkg/sql/distsql_physical_planner.go @@ -446,7 +446,7 @@ func (dsp *DistSQLPlanner) checkSupportForNode(node planNode) (distRecommendatio rec = rec.compose(shouldDistribute) } // Check if we are doing a full scan. - if len(n.spans) == 1 && n.spans[0].EqualValue(n.desc.IndexSpan(n.index.ID)) { + if n.isFull { rec = rec.compose(shouldDistribute) } return rec, nil diff --git a/pkg/sql/distsql_plan_stats.go b/pkg/sql/distsql_plan_stats.go index ce67a7b561a6..a71562df0f99 100644 --- a/pkg/sql/distsql_plan_stats.go +++ b/pkg/sql/distsql_plan_stats.go @@ -87,6 +87,7 @@ func (dsp *DistSQLPlanner) createStatsPlan( if err != nil { return PhysicalPlan{}, err } + scan.isFull = true p, err := dsp.createTableReaders(planCtx, &scan, nil /* overrideResultColumns */) if err != nil { diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index 246ee174f39b..f7417b8e6237 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -120,6 +120,7 @@ func (ef *execFactory) ConstructScan( if err != nil { return nil, err } + scan.isFull = len(scan.spans) == 1 && scan.spans[0].EqualValue(scan.desc.IndexSpan(scan.index.ID)) for i := range reqOrdering { if reqOrdering[i].ColIdx >= len(colCfg.wantedColumns) { return nil, errors.Errorf("invalid reqOrdering: %v", reqOrdering) diff --git a/pkg/sql/scan.go b/pkg/sql/scan.go index 9165bb17f98e..aeadd2385dfb 100644 --- a/pkg/sql/scan.go +++ b/pkg/sql/scan.go @@ -99,6 +99,10 @@ type scanNode struct { // Should be set to true if sqlbase.ParallelScans is true. parallelScansEnabled bool + // Is this a full scan of an index? + isFull bool + + // Is this a scan of a secondary index? isSecondaryIndex bool // Indicates if this scanNode will do a physical data check. This is diff --git a/pkg/sql/scrub_physical.go b/pkg/sql/scrub_physical.go index 650a1796f3a3..02a66ae89a8e 100644 --- a/pkg/sql/scrub_physical.go +++ b/pkg/sql/scrub_physical.go @@ -117,6 +117,7 @@ func (o *physicalCheckOperation) Start(params runParams) error { if err != nil { return err } + scan.isFull = true planCtx := params.extendedEvalCtx.DistSQLPlanner.NewPlanningCtx(ctx, params.extendedEvalCtx, params.p.txn) physPlan, err := params.extendedEvalCtx.DistSQLPlanner.createScrubPhysicalCheck( From 164f82bf2b2a3a2bb2f5e95289472a2800893f61 Mon Sep 17 00:00:00 2001 From: Nathan VanBenschoten Date: Wed, 29 Apr 2020 21:42:47 -0400 Subject: [PATCH 4/4] sql: inject tenant ID in sqlServerArgs, pass through ExecutorConfig Fixes #47903. Also known as "the grand plumbing", this commit replaces a few instances of `TODOSQLCodec` in `pkg/sql/sqlbase/index_encoding.go` and watches the house of cards fall apart. It then glues the world back together, this time using a properly injected tenant-bound SQLCodec to encode and decode all SQL table keys. A tenant ID field is added to `sqlServerArgs`. This is used to construct a tenant-bound `keys.SQLCodec` during server creation. This codec morally lives on the `sql.ExecutorConfig`. In practice, it is also copied onto `tree.EvalContext` and `execinfra.ServerConfig` to help carry it around. SQL code is adapted to use this codec whenever it needs to encode or decode keys. If all tests pass after this refactor, there is a good chance it got things right. This is because any use of an uninitialized SQLCodec will panic immediately when the codec is first used. This was helpful in ensuring that it was properly plumbed everywhere. --- pkg/ccl/backupccl/backup_planning.go | 8 ++-- pkg/ccl/backupccl/backup_test.go | 2 +- pkg/ccl/backupccl/restore_job.go | 2 +- pkg/ccl/backupccl/show_test.go | 5 +- pkg/ccl/changefeedccl/bench_test.go | 6 ++- pkg/ccl/changefeedccl/changefeed.go | 3 +- pkg/ccl/changefeedccl/changefeed_dist.go | 11 +++-- .../changefeedccl/changefeed_processors.go | 2 +- pkg/ccl/changefeedccl/rowfetcher_cache.go | 9 ++-- pkg/ccl/importccl/import_stmt.go | 14 +++--- pkg/ccl/importccl/load.go | 3 +- pkg/ccl/importccl/read_import_mysql.go | 7 ++- pkg/ccl/importccl/read_import_pgdump.go | 2 +- pkg/ccl/importccl/testutils_test.go | 1 + pkg/ccl/partitionccl/drop_test.go | 3 +- pkg/ccl/partitionccl/partition.go | 9 ++-- pkg/ccl/partitionccl/partition_test.go | 4 +- pkg/ccl/partitionccl/zone_test.go | 2 +- pkg/ccl/storageccl/bench_test.go | 2 +- pkg/ccl/storageccl/key_rewriter_test.go | 7 +-- pkg/keys/spans.go | 3 ++ .../reports/constraint_stats_report_test.go | 4 +- pkg/server/server.go | 3 +- pkg/server/server_sql.go | 18 +++++-- pkg/server/settingsworker.go | 5 +- pkg/server/testserver.go | 3 +- pkg/sql/alter_table.go | 6 ++- pkg/sql/backfill.go | 42 +++++++++++------ pkg/sql/backfill/backfill.go | 19 ++++++-- pkg/sql/colexec/cfetcher.go | 3 +- pkg/sql/colexec/colbatch_scan.go | 8 ++-- pkg/sql/colflow/colbatch_scan_test.go | 5 +- pkg/sql/conn_executor.go | 1 + pkg/sql/conn_executor_internal_test.go | 2 + pkg/sql/crdb_internal.go | 6 +-- pkg/sql/create_sequence.go | 2 +- pkg/sql/create_table.go | 9 ++-- pkg/sql/delete_range.go | 1 + pkg/sql/distsql/server.go | 1 + pkg/sql/distsql_plan_csv.go | 2 +- pkg/sql/distsql_plan_stats.go | 2 +- pkg/sql/drop_index.go | 10 +++- pkg/sql/drop_table.go | 2 +- pkg/sql/drop_test.go | 36 +++++++------- pkg/sql/exec_util.go | 2 + pkg/sql/execinfra/flow_context.go | 6 +++ pkg/sql/execinfra/server_config.go | 15 ++++-- pkg/sql/flowinfra/cluster_test.go | 3 +- pkg/sql/flowinfra/server_test.go | 3 +- pkg/sql/gcjob/index_garbage_collection.go | 11 +++-- pkg/sql/gcjob/refresh_statuses.go | 10 ++-- pkg/sql/gcjob/table_garbage_collection.go | 12 +++-- pkg/sql/insert_fast_path.go | 10 ++-- pkg/sql/join_test.go | 3 +- pkg/sql/lease_test.go | 2 +- pkg/sql/opt_catalog.go | 15 ++++-- pkg/sql/opt_exec_factory.go | 24 +++++++--- pkg/sql/partition_utils.go | 14 +++--- pkg/sql/pgwire/testdata/pgtest/notice | 2 +- .../physicalplan/fake_span_resolver_test.go | 2 +- pkg/sql/planner.go | 1 + pkg/sql/relocate.go | 2 +- pkg/sql/revert.go | 3 +- pkg/sql/revert_test.go | 9 ++-- pkg/sql/row/cascader.go | 23 ++++++--- pkg/sql/row/deleter.go | 24 ++++++++-- pkg/sql/row/errors.go | 15 ++++-- pkg/sql/row/fetcher.go | 11 ++++- pkg/sql/row/fetcher_mvcc_test.go | 3 +- pkg/sql/row/fetcher_test.go | 15 +++--- pkg/sql/row/fk_existence_base.go | 5 +- pkg/sql/row/fk_existence_delete.go | 4 +- pkg/sql/row/fk_existence_insert.go | 4 +- pkg/sql/row/fk_existence_update.go | 6 ++- pkg/sql/row/helper.go | 18 +++++-- pkg/sql/row/inserter.go | 6 ++- pkg/sql/row/row_converter.go | 1 + pkg/sql/row/updater.go | 19 +++++--- pkg/sql/rowexec/index_skip_table_reader.go | 1 + .../rowexec/index_skip_table_reader_test.go | 33 ++++++------- pkg/sql/rowexec/indexbackfiller.go | 2 +- pkg/sql/rowexec/indexjoiner.go | 3 +- pkg/sql/rowexec/interleaved_reader_joiner.go | 4 +- .../rowexec/interleaved_reader_joiner_test.go | 25 +++++----- pkg/sql/rowexec/joinreader.go | 4 +- pkg/sql/rowexec/rowfetcher.go | 10 +++- pkg/sql/rowexec/scrub_tablereader.go | 4 +- pkg/sql/rowexec/tablereader.go | 2 +- pkg/sql/rowexec/tablereader_test.go | 13 ++--- pkg/sql/rowexec/zigzagjoiner.go | 11 +++-- pkg/sql/scatter.go | 6 +-- pkg/sql/schema_changer.go | 17 +++++-- pkg/sql/scrub_physical.go | 2 +- pkg/sql/scrub_test.go | 27 ++++++----- pkg/sql/sem/builtins/builtins.go | 4 +- pkg/sql/sem/tree/eval.go | 5 +- pkg/sql/set_zone_config.go | 2 +- pkg/sql/show_create.go | 4 +- pkg/sql/show_create_clauses.go | 9 ++-- pkg/sql/span/span_builder.go | 10 ++-- pkg/sql/span_builder_test.go | 3 +- pkg/sql/split.go | 9 ++-- pkg/sql/sqlbase/index_encoding.go | 47 +++++++++++-------- pkg/sql/sqlbase/partition.go | 4 +- pkg/sql/sqlbase/structured.go | 29 +++++++----- pkg/sql/sqlbase/table_test.go | 32 ++++++++----- pkg/sql/sqlbase/testutils.go | 3 +- pkg/sql/sqlbase/utils_test.go | 4 +- pkg/sql/table.go | 2 +- pkg/sql/tablewriter_delete.go | 10 ++-- pkg/sql/tablewriter_upsert_opt.go | 4 +- pkg/sql/truncate.go | 8 +++- pkg/sql/unsplit.go | 2 +- 113 files changed, 616 insertions(+), 337 deletions(-) diff --git a/pkg/ccl/backupccl/backup_planning.go b/pkg/ccl/backupccl/backup_planning.go index dbb937051bc7..a9909ce26f63 100644 --- a/pkg/ccl/backupccl/backup_planning.go +++ b/pkg/ccl/backupccl/backup_planning.go @@ -87,14 +87,14 @@ type tableAndIndex struct { // spansForAllTableIndexes returns non-overlapping spans for every index and // table passed in. They would normally overlap if any of them are interleaved. func spansForAllTableIndexes( - tables []*sqlbase.TableDescriptor, revs []BackupManifest_DescriptorRevision, + codec keys.SQLCodec, tables []*sqlbase.TableDescriptor, revs []BackupManifest_DescriptorRevision, ) []roachpb.Span { added := make(map[tableAndIndex]bool, len(tables)) sstIntervalTree := interval.NewTree(interval.ExclusiveOverlapper) for _, table := range tables { for _, index := range table.AllNonDropIndexes() { - if err := sstIntervalTree.Insert(intervalSpan(table.IndexSpan(index.ID)), false); err != nil { + if err := sstIntervalTree.Insert(intervalSpan(table.IndexSpan(codec, index.ID)), false); err != nil { panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan")) } added[tableAndIndex{tableID: table.ID, indexID: index.ID}] = true @@ -108,7 +108,7 @@ func spansForAllTableIndexes( for _, idx := range tbl.AllNonDropIndexes() { key := tableAndIndex{tableID: tbl.ID, indexID: idx.ID} if !added[key] { - if err := sstIntervalTree.Insert(intervalSpan(tbl.IndexSpan(idx.ID)), false); err != nil { + if err := sstIntervalTree.Insert(intervalSpan(tbl.IndexSpan(codec, idx.ID)), false); err != nil { panic(errors.NewAssertionErrorWithWrappedErrf(err, "IndexSpan")) } added[key] = true @@ -528,7 +528,7 @@ func backupPlanHook( } } - spans := spansForAllTableIndexes(tables, revs) + spans := spansForAllTableIndexes(p.ExecCfg().Codec, tables, revs) if len(prevBackups) > 0 { tablesInPrev := make(map[sqlbase.ID]struct{}) diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index f4559f93c860..3b0d72c91eff 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -1046,7 +1046,7 @@ func TestBackupRestoreResume(t *testing.T) { t.Run("backup", func(t *testing.T) { sqlDB := sqlutils.MakeSQLRunner(outerDB.DB) - backupStartKey := backupTableDesc.PrimaryIndexSpan().Key + backupStartKey := backupTableDesc.PrimaryIndexSpan(keys.SystemSQLCodec).Key backupEndKey, err := sqlbase.TestingMakePrimaryIndexKey(backupTableDesc, numAccounts/2) if err != nil { t.Fatal(err) diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index c5e5f1e1d167..45aad5d20e8c 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -904,7 +904,7 @@ func createImportingTables( // We get the spans of the restoring tables _as they appear in the backup_, // that is, in the 'old' keyspace, before we reassign the table IDs. - spans := spansForAllTableIndexes(tables, nil) + spans := spansForAllTableIndexes(p.ExecCfg().Codec, tables, nil) log.Eventf(ctx, "starting restore for %d tables", len(tables)) diff --git a/pkg/ccl/backupccl/show_test.go b/pkg/ccl/backupccl/show_test.go index 6a0b3c047e7a..221e6051943a 100644 --- a/pkg/ccl/backupccl/show_test.go +++ b/pkg/ccl/backupccl/show_test.go @@ -15,6 +15,7 @@ import ( "strings" "testing" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -100,8 +101,8 @@ func TestShowBackup(t *testing.T) { details1Desc := sqlbase.GetTableDescriptor(tc.Server(0).DB(), "data", "details1") details2Desc := sqlbase.GetTableDescriptor(tc.Server(0).DB(), "data", "details2") - details1Key := roachpb.Key(sqlbase.MakeIndexKeyPrefix(details1Desc, details1Desc.PrimaryIndex.ID)) - details2Key := roachpb.Key(sqlbase.MakeIndexKeyPrefix(details2Desc, details2Desc.PrimaryIndex.ID)) + details1Key := roachpb.Key(sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, details1Desc, details1Desc.PrimaryIndex.ID)) + details2Key := roachpb.Key(sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, details2Desc, details2Desc.PrimaryIndex.ID)) sqlDB.CheckQueryResults(t, fmt.Sprintf(`SHOW BACKUP RANGES '%s'`, details), [][]string{ {"/Table/56/1", "/Table/56/2", string(details1Key), string(details1Key.PrefixEnd())}, diff --git a/pkg/ccl/changefeedccl/bench_test.go b/pkg/ccl/changefeedccl/bench_test.go index bfeaf857d973..24f821b1826f 100644 --- a/pkg/ccl/changefeedccl/bench_test.go +++ b/pkg/ccl/changefeedccl/bench_test.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/kvfeed" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql" @@ -184,7 +185,7 @@ func createBenchmarkChangefeed( database, table string, ) (*benchSink, func() error, error) { tableDesc := sqlbase.GetTableDescriptor(s.DB(), database, table) - spans := []roachpb.Span{tableDesc.PrimaryIndexSpan()} + spans := []roachpb.Span{tableDesc.PrimaryIndexSpan(keys.SystemSQLCodec)} details := jobspb.ChangefeedDetails{ Targets: jobspb.ChangefeedTargets{tableDesc.ID: jobspb.ChangefeedTarget{ StatementTimeName: tableDesc.Name, @@ -232,7 +233,8 @@ func createBenchmarkChangefeed( NeedsInitialScan: needsInitialScan, } - rowsFn := kvsToRows(s.LeaseManager().(*sql.LeaseManager), details, buf.Get) + rowsFn := kvsToRows(s.ExecutorConfig().(sql.ExecutorConfig).Codec, + s.LeaseManager().(*sql.LeaseManager), details, buf.Get) sf := span.MakeFrontier(spans...) tickFn := emitEntries(s.ClusterSettings(), details, hlc.Timestamp{}, sf, encoder, sink, rowsFn, TestingKnobs{}, metrics) diff --git a/pkg/ccl/changefeedccl/changefeed.go b/pkg/ccl/changefeedccl/changefeed.go index dd0ccc8df799..3aee16b49d14 100644 --- a/pkg/ccl/changefeedccl/changefeed.go +++ b/pkg/ccl/changefeedccl/changefeed.go @@ -54,12 +54,13 @@ type emitEntry struct { // returns a closure that may be repeatedly called to advance the changefeed. // The returned closure is not threadsafe. func kvsToRows( + codec keys.SQLCodec, leaseMgr *sql.LeaseManager, details jobspb.ChangefeedDetails, inputFn func(context.Context) (kvfeed.Event, error), ) func(context.Context) ([]emitEntry, error) { _, withDiff := details.Opts[changefeedbase.OptDiff] - rfCache := newRowFetcherCache(leaseMgr) + rfCache := newRowFetcherCache(codec, leaseMgr) var kvs row.SpanKVFetcher appendEmitEntryForKV := func( diff --git a/pkg/ccl/changefeedccl/changefeed_dist.go b/pkg/ccl/changefeedccl/changefeed_dist.go index fbf8e1a3fbf6..f39594106ab4 100644 --- a/pkg/ccl/changefeedccl/changefeed_dist.go +++ b/pkg/ccl/changefeedccl/changefeed_dist.go @@ -12,6 +12,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" @@ -99,7 +100,7 @@ func distChangefeedFlow( } execCfg := phs.ExecCfg() - trackedSpans, err := fetchSpansForTargets(ctx, execCfg.DB, details.Targets, spansTS) + trackedSpans, err := fetchSpansForTargets(ctx, execCfg.DB, execCfg.Codec, details.Targets, spansTS) if err != nil { return err } @@ -211,7 +212,11 @@ func distChangefeedFlow( } func fetchSpansForTargets( - ctx context.Context, db *kv.DB, targets jobspb.ChangefeedTargets, ts hlc.Timestamp, + ctx context.Context, + db *kv.DB, + codec keys.SQLCodec, + targets jobspb.ChangefeedTargets, + ts hlc.Timestamp, ) ([]roachpb.Span, error) { var spans []roachpb.Span err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { @@ -223,7 +228,7 @@ func fetchSpansForTargets( if err != nil { return err } - spans = append(spans, tableDesc.PrimaryIndexSpan()) + spans = append(spans, tableDesc.PrimaryIndexSpan(codec)) } return nil }) diff --git a/pkg/ccl/changefeedccl/changefeed_processors.go b/pkg/ccl/changefeedccl/changefeed_processors.go index f4fe8b8a0b31..e8d768e5c236 100644 --- a/pkg/ccl/changefeedccl/changefeed_processors.go +++ b/pkg/ccl/changefeedccl/changefeed_processors.go @@ -205,7 +205,7 @@ func (ca *changeAggregator) Start(ctx context.Context) context.Context { _, withDiff := ca.spec.Feed.Opts[changefeedbase.OptDiff] kvfeedCfg := makeKVFeedCfg(ca.flowCtx.Cfg, leaseMgr, ca.kvFeedMemMon, ca.spec, spans, withDiff, buf, metrics) - rowsFn := kvsToRows(leaseMgr, ca.spec.Feed, buf.Get) + rowsFn := kvsToRows(ca.flowCtx.Codec(), leaseMgr, ca.spec.Feed, buf.Get) ca.tickFn = emitEntries(ca.flowCtx.Cfg.Settings, ca.spec.Feed, kvfeedCfg.InitialHighWater, sf, ca.encoder, ca.sink, rowsFn, knobs, metrics) ca.startKVFeed(ctx, kvfeedCfg) diff --git a/pkg/ccl/changefeedccl/rowfetcher_cache.go b/pkg/ccl/changefeedccl/rowfetcher_cache.go index f108e4ddf98e..d0fb11562598 100644 --- a/pkg/ccl/changefeedccl/rowfetcher_cache.go +++ b/pkg/ccl/changefeedccl/rowfetcher_cache.go @@ -27,14 +27,16 @@ import ( // StartScanFrom can be used to turn that key (or all the keys making up the // column families of one row) into a row. type rowFetcherCache struct { + codec keys.SQLCodec leaseMgr *sql.LeaseManager fetchers map[*sqlbase.ImmutableTableDescriptor]*row.Fetcher a sqlbase.DatumAlloc } -func newRowFetcherCache(leaseMgr *sql.LeaseManager) *rowFetcherCache { +func newRowFetcherCache(codec keys.SQLCodec, leaseMgr *sql.LeaseManager) *rowFetcherCache { return &rowFetcherCache{ + codec: codec, leaseMgr: leaseMgr, fetchers: make(map[*sqlbase.ImmutableTableDescriptor]*row.Fetcher), } @@ -44,7 +46,7 @@ func (c *rowFetcherCache) TableDescForKey( ctx context.Context, key roachpb.Key, ts hlc.Timestamp, ) (*sqlbase.ImmutableTableDescriptor, error) { var tableDesc *sqlbase.ImmutableTableDescriptor - key, err := keys.TODOSQLCodec.StripTenantPrefix(key) + key, err := c.codec.StripTenantPrefix(key) if err != nil { return nil, err } @@ -103,13 +105,14 @@ func (c *rowFetcherCache) RowFetcherForTableDesc( var rf row.Fetcher if err := rf.Init( + c.codec, false, /* reverse */ sqlbase.ScanLockingStrength_FOR_NONE, false, /* returnRangeInfo */ false, /* isCheck */ &c.a, row.FetcherTableArgs{ - Spans: tableDesc.AllIndexSpans(), + Spans: tableDesc.AllIndexSpans(c.codec), Desc: tableDesc, Index: &tableDesc.PrimaryIndex, ColIdxMap: colIdxMap, diff --git a/pkg/ccl/importccl/import_stmt.go b/pkg/ccl/importccl/import_stmt.go index cee7eab2420c..7d1309c0e3b2 100644 --- a/pkg/ccl/importccl/import_stmt.go +++ b/pkg/ccl/importccl/import_stmt.go @@ -710,9 +710,10 @@ func importPlanHook( // Prepare the protected timestamp record. var spansToProtect []roachpb.Span + codec := p.(sql.PlanHookState).ExecCfg().Codec for i := range tableDetails { if td := &tableDetails[i]; !td.IsNew { - spansToProtect = append(spansToProtect, td.Desc.TableSpan()) + spansToProtect = append(spansToProtect, td.Desc.TableSpan(codec)) } } if len(spansToProtect) > 0 { @@ -1260,9 +1261,8 @@ func (r *importResumer) OnFailOrCancel(ctx context.Context, phs interface{}) err telemetry.Count("import.total.failed") cfg := phs.(sql.PlanHookState).ExecCfg() - jr := cfg.JobRegistry return cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - if err := r.dropTables(ctx, jr, txn); err != nil { + if err := r.dropTables(ctx, txn, cfg); err != nil { return err } return r.releaseProtectedTimestamp(ctx, txn, cfg.ProtectedTimestampProvider) @@ -1289,7 +1289,9 @@ func (r *importResumer) releaseProtectedTimestamp( } // dropTables implements the OnFailOrCancel logic. -func (r *importResumer) dropTables(ctx context.Context, jr *jobs.Registry, txn *kv.Txn) error { +func (r *importResumer) dropTables( + ctx context.Context, txn *kv.Txn, execCfg *sql.ExecutorConfig, +) error { details := r.job.Details().(jobspb.ImportDetails) // Needed to trigger the schema change manager. @@ -1325,7 +1327,7 @@ func (r *importResumer) dropTables(ctx context.Context, jr *jobs.Registry, txn * return errors.Errorf("invalid pre-IMPORT time to rollback") } ts := hlc.Timestamp{WallTime: details.Walltime}.Prev() - if err := sql.RevertTables(ctx, txn.DB(), revert, ts, sql.RevertTableDefaultBatchSize); err != nil { + if err := sql.RevertTables(ctx, txn.DB(), execCfg, revert, ts, sql.RevertTableDefaultBatchSize); err != nil { return errors.Wrap(err, "rolling back partially completed IMPORT") } } @@ -1382,7 +1384,7 @@ func (r *importResumer) dropTables(ctx context.Context, jr *jobs.Registry, txn * Progress: jobspb.SchemaChangeGCProgress{}, NonCancelable: true, } - if _, err := jr.CreateJobWithTxn(ctx, gcJobRecord, txn); err != nil { + if _, err := execCfg.JobRegistry.CreateJobWithTxn(ctx, gcJobRecord, txn); err != nil { return err } diff --git a/pkg/ccl/importccl/load.go b/pkg/ccl/importccl/load.go index d0547cd37f40..745b804141ab 100644 --- a/pkg/ccl/importccl/load.go +++ b/pkg/ccl/importccl/load.go @@ -113,6 +113,7 @@ func Load( evalCtx := &tree.EvalContext{} evalCtx.SetTxnTimestamp(curTime) evalCtx.SetStmtTimestamp(curTime) + evalCtx.Codec = keys.TODOSQLCodec blobClientFactory := blobs.TestBlobServiceClient(writeToDir) conf, err := cloud.ExternalStorageConfFromURI(uri) @@ -223,7 +224,7 @@ func Load( } ri, err = row.MakeInserter( - ctx, nil, tableDesc, tableDesc.Columns, row.SkipFKs, nil /* fkTables */, &sqlbase.DatumAlloc{}, + ctx, nil, evalCtx.Codec, tableDesc, tableDesc.Columns, row.SkipFKs, nil /* fkTables */, &sqlbase.DatumAlloc{}, ) if err != nil { return backupccl.BackupManifest{}, errors.Wrap(err, "make row inserter") diff --git a/pkg/ccl/importccl/read_import_mysql.go b/pkg/ccl/importccl/read_import_mysql.go index 0c5f1c0d62cc..3fe97f98903d 100644 --- a/pkg/ccl/importccl/read_import_mysql.go +++ b/pkg/ccl/importccl/read_import_mysql.go @@ -19,7 +19,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/lex" @@ -332,7 +331,7 @@ func readMysqlCreateTable( if match != "" && !found { return nil, errors.Errorf("table %q not found in file (found tables: %s)", match, strings.Join(names, ", ")) } - if err := addDelayedFKs(ctx, fkDefs, fks.resolver, evalCtx.Settings); err != nil { + if err := addDelayedFKs(ctx, fkDefs, fks.resolver, evalCtx); err != nil { return nil, err } return ret, nil @@ -540,11 +539,11 @@ type delayedFK struct { } func addDelayedFKs( - ctx context.Context, defs []delayedFK, resolver fkResolver, settings *cluster.Settings, + ctx context.Context, defs []delayedFK, resolver fkResolver, evalCtx *tree.EvalContext, ) error { for _, def := range defs { if err := sql.ResolveFK( - ctx, nil, resolver, def.tbl, def.def, map[sqlbase.ID]*sqlbase.MutableTableDescriptor{}, sql.NewTable, tree.ValidationDefault, settings, + ctx, nil, resolver, def.tbl, def.def, map[sqlbase.ID]*sqlbase.MutableTableDescriptor{}, sql.NewTable, tree.ValidationDefault, evalCtx, ); err != nil { return err } diff --git a/pkg/ccl/importccl/read_import_pgdump.go b/pkg/ccl/importccl/read_import_pgdump.go index aad392f7a0e9..680bbed30892 100644 --- a/pkg/ccl/importccl/read_import_pgdump.go +++ b/pkg/ccl/importccl/read_import_pgdump.go @@ -275,7 +275,7 @@ func readPostgresCreateTable( } for _, constraint := range constraints { if err := sql.ResolveFK( - evalCtx.Ctx(), nil /* txn */, fks.resolver, desc, constraint, backrefs, sql.NewTable, tree.ValidationDefault, p.ExecCfg().Settings, + evalCtx.Ctx(), nil /* txn */, fks.resolver, desc, constraint, backrefs, sql.NewTable, tree.ValidationDefault, evalCtx, ); err != nil { return nil, err } diff --git a/pkg/ccl/importccl/testutils_test.go b/pkg/ccl/importccl/testutils_test.go index 1a5495b5cf15..1cad3ae613da 100644 --- a/pkg/ccl/importccl/testutils_test.go +++ b/pkg/ccl/importccl/testutils_test.go @@ -85,6 +85,7 @@ var testEvalCtx = &tree.EvalContext{ }, StmtTimestamp: timeutil.Unix(100000000, 0), Settings: cluster.MakeTestingClusterSettings(), + Codec: keys.SystemSQLCodec, } // Value generator represents a value of some data at specified row/col. diff --git a/pkg/ccl/partitionccl/drop_test.go b/pkg/ccl/partitionccl/drop_test.go index ddc42babdde0..0ba08e9d1711 100644 --- a/pkg/ccl/partitionccl/drop_test.go +++ b/pkg/ccl/partitionccl/drop_test.go @@ -15,6 +15,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/gcjob" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -71,7 +72,7 @@ func TestDropIndexWithZoneConfigCCL(t *testing.T) { if err != nil { t.Fatal(err) } - indexSpan := tableDesc.IndexSpan(indexDesc.ID) + indexSpan := tableDesc.IndexSpan(keys.SystemSQLCodec, indexDesc.ID) tests.CheckKeyCount(t, kvDB, indexSpan, numRows) // Set zone configs on the primary index, secondary index, and one partition diff --git a/pkg/ccl/partitionccl/partition.go b/pkg/ccl/partitionccl/partition.go index 30affa6fecef..60f684a11f4f 100644 --- a/pkg/ccl/partitionccl/partition.go +++ b/pkg/ccl/partitionccl/partition.go @@ -271,7 +271,7 @@ func selectPartitionExprs( if err := tableDesc.ForeachNonDropIndex(func(idxDesc *sqlbase.IndexDescriptor) error { genExpr := true return selectPartitionExprsByName( - a, tableDesc, idxDesc, &idxDesc.Partitioning, prefixDatums, exprsByPartName, genExpr) + a, evalCtx, tableDesc, idxDesc, &idxDesc.Partitioning, prefixDatums, exprsByPartName, genExpr) }); err != nil { return nil, err } @@ -322,6 +322,7 @@ func selectPartitionExprs( // that the requested partitions are all valid). func selectPartitionExprsByName( a *sqlbase.DatumAlloc, + evalCtx *tree.EvalContext, tableDesc *sqlbase.TableDescriptor, idxDesc *sqlbase.IndexDescriptor, partDesc *sqlbase.PartitioningDescriptor, @@ -340,7 +341,7 @@ func selectPartitionExprsByName( exprsByPartName[l.Name] = tree.DBoolFalse var fakeDatums tree.Datums if err := selectPartitionExprsByName( - a, tableDesc, idxDesc, &l.Subpartitioning, fakeDatums, exprsByPartName, genExpr, + a, evalCtx, tableDesc, idxDesc, &l.Subpartitioning, fakeDatums, exprsByPartName, genExpr, ); err != nil { return err } @@ -380,7 +381,7 @@ func selectPartitionExprsByName( for _, l := range partDesc.List { for _, valueEncBuf := range l.Values { t, _, err := sqlbase.DecodePartitionTuple( - a, tableDesc, idxDesc, partDesc, valueEncBuf, prefixDatums) + a, evalCtx.Codec, tableDesc, idxDesc, partDesc, valueEncBuf, prefixDatums) if err != nil { return err } @@ -414,7 +415,7 @@ func selectPartitionExprsByName( genExpr = false } if err := selectPartitionExprsByName( - a, tableDesc, idxDesc, &l.Subpartitioning, allDatums, exprsByPartName, genExpr, + a, evalCtx, tableDesc, idxDesc, &l.Subpartitioning, allDatums, exprsByPartName, genExpr, ); err != nil { return err } diff --git a/pkg/ccl/partitionccl/partition_test.go b/pkg/ccl/partitionccl/partition_test.go index 99d66ce82b14..c73720805bff 100644 --- a/pkg/ccl/partitionccl/partition_test.go +++ b/pkg/ccl/partitionccl/partition_test.go @@ -1252,7 +1252,7 @@ func TestSelectPartitionExprs(t *testing.T) { {`p33p44,p335p445,p33dp44d`, `((a, b) = (3, 3)) OR ((a, b) = (4, 4))`}, } - evalCtx := &tree.EvalContext{} + evalCtx := &tree.EvalContext{Codec: keys.SystemSQLCodec} for _, test := range tests { t.Run(test.partitions, func(t *testing.T) { var partNames tree.NameList @@ -1332,7 +1332,7 @@ func TestRepartitioning(t *testing.T) { repartition.WriteString(`PARTITION BY NOTHING`) } else { if err := sql.ShowCreatePartitioning( - &sqlbase.DatumAlloc{}, test.new.parsed.tableDesc, testIndex, + &sqlbase.DatumAlloc{}, keys.SystemSQLCodec, test.new.parsed.tableDesc, testIndex, &testIndex.Partitioning, &repartition, 0 /* indent */, 0, /* colOffset */ ); err != nil { t.Fatalf("%+v", err) diff --git a/pkg/ccl/partitionccl/zone_test.go b/pkg/ccl/partitionccl/zone_test.go index 028eddc0b93b..090249316803 100644 --- a/pkg/ccl/partitionccl/zone_test.go +++ b/pkg/ccl/partitionccl/zone_test.go @@ -289,7 +289,7 @@ func TestGenerateSubzoneSpans(t *testing.T) { clusterID := uuid.MakeV4() hasNewSubzones := false spans, err := sql.GenerateSubzoneSpans( - cluster.NoSettings, clusterID, test.parsed.tableDesc, test.parsed.subzones, hasNewSubzones) + cluster.NoSettings, clusterID, keys.SystemSQLCodec, test.parsed.tableDesc, test.parsed.subzones, hasNewSubzones) if err != nil { t.Fatalf("generating subzone spans: %+v", err) } diff --git a/pkg/ccl/storageccl/bench_test.go b/pkg/ccl/storageccl/bench_test.go index e3914303525a..760bc876f0c9 100644 --- a/pkg/ccl/storageccl/bench_test.go +++ b/pkg/ccl/storageccl/bench_test.go @@ -178,7 +178,7 @@ func BenchmarkImport(b *testing.B) { if tableDesc == nil || tableDesc.ParentID == keys.SystemDatabaseID { b.Fatalf("bad table descriptor: %+v", tableDesc) } - oldStartKey = sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID) + oldStartKey = sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, tableDesc, tableDesc.PrimaryIndex.ID) newDesc := *tableDesc newDesc.ID = id newDescBytes, err := protoutil.Marshal(sqlbase.WrapDescriptor(&newDesc)) diff --git a/pkg/ccl/storageccl/key_rewriter_test.go b/pkg/ccl/storageccl/key_rewriter_test.go index b7385dc62f36..7edbc7eb2a43 100644 --- a/pkg/ccl/storageccl/key_rewriter_test.go +++ b/pkg/ccl/storageccl/key_rewriter_test.go @@ -12,6 +12,7 @@ import ( "bytes" "testing" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -76,7 +77,7 @@ func TestKeyRewriter(t *testing.T) { } t.Run("normal", func(t *testing.T) { - key := sqlbase.MakeIndexKeyPrefix(&sqlbase.NamespaceTable, desc.PrimaryIndex.ID) + key := sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, &sqlbase.NamespaceTable, desc.PrimaryIndex.ID) newKey, ok, err := kr.RewriteKey(key, notSpan) if err != nil { t.Fatal(err) @@ -94,7 +95,7 @@ func TestKeyRewriter(t *testing.T) { }) t.Run("prefix end", func(t *testing.T) { - key := roachpb.Key(sqlbase.MakeIndexKeyPrefix(&sqlbase.NamespaceTable, desc.PrimaryIndex.ID)).PrefixEnd() + key := roachpb.Key(sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, &sqlbase.NamespaceTable, desc.PrimaryIndex.ID)).PrefixEnd() newKey, ok, err := kr.RewriteKey(key, notSpan) if err != nil { t.Fatal(err) @@ -123,7 +124,7 @@ func TestKeyRewriter(t *testing.T) { t.Fatal(err) } - key := sqlbase.MakeIndexKeyPrefix(&sqlbase.NamespaceTable, desc.PrimaryIndex.ID) + key := sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, &sqlbase.NamespaceTable, desc.PrimaryIndex.ID) newKey, ok, err := newKr.RewriteKey(key, notSpan) if err != nil { t.Fatal(err) diff --git a/pkg/keys/spans.go b/pkg/keys/spans.go index eeb29fe24487..516538d79efb 100644 --- a/pkg/keys/spans.go +++ b/pkg/keys/spans.go @@ -34,6 +34,9 @@ var ( NodeLivenessSpan = roachpb.Span{Key: NodeLivenessPrefix, EndKey: NodeLivenessKeyMax} // SystemConfigSpan is the range of system objects which will be gossiped. + // + // TODO(nvanbenschoten): references to this span need to be prefixed by + // tenant ID. This is tracked in #48184. SystemConfigSpan = roachpb.Span{Key: SystemConfigSplitKey, EndKey: SystemConfigTableDataMax} // NoSplitSpans describes the ranges that should never be split. diff --git a/pkg/kv/kvserver/reports/constraint_stats_report_test.go b/pkg/kv/kvserver/reports/constraint_stats_report_test.go index a80342f8d8ac..016bcc274a7c 100644 --- a/pkg/kv/kvserver/reports/constraint_stats_report_test.go +++ b/pkg/kv/kvserver/reports/constraint_stats_report_test.go @@ -857,8 +857,8 @@ func generateTableZone(t table, tableDesc sqlbase.TableDescriptor) (*zonepb.Zone if tableZone != nil { var err error tableZone.SubzoneSpans, err = sql.GenerateSubzoneSpans( - nil, uuid.UUID{} /* clusterID */, &tableDesc, tableZone.Subzones, - false /* hasNewSubzones */) + nil, uuid.UUID{} /* clusterID */, keys.SystemSQLCodec, + &tableDesc, tableZone.Subzones, false /* hasNewSubzones */) if err != nil { return nil, errors.Wrap(err, "error generating subzone spans") } diff --git a/pkg/server/server.go b/pkg/server/server.go index 4b4e48150d5a..f259ec13affd 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -537,13 +537,14 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { Config: &cfg, // NB: s.cfg has a populated AmbientContext. stopper: stopper, clock: clock, - protectedtsProvider: protectedtsProvider, runtime: runtimeSampler, + tenantID: roachpb.SystemTenantID, db: db, registry: registry, sessionRegistry: sessionRegistry, circularInternalExecutor: internalExecutor, jobRegistry: jobRegistry, + protectedtsProvider: protectedtsProvider, }) if err != nil { return nil, err diff --git a/pkg/server/server_sql.go b/pkg/server/server_sql.go index 98a982595fc6..259eca7bf587 100644 --- a/pkg/server/server_sql.go +++ b/pkg/server/server_sql.go @@ -135,12 +135,13 @@ type sqlServerArgs struct { // other things. clock *hlc.Clock - // The executorConfig uses the provider. - protectedtsProvider protectedts.Provider // DistSQLCfg holds on to this to check for node CPU utilization in // samplerProcessor. runtime execinfra.RuntimeStats + // The tenant that the SQL server runs on the behalf of. + tenantID roachpb.TenantID + // SQL uses KV, both for non-DistSQL and DistSQL execution. db *kv.DB @@ -160,10 +161,15 @@ type sqlServerArgs struct { // pointer to an empty struct in this configuration, which newSQLServer // fills. jobRegistry *jobs.Registry + + // The executorConfig uses the provider. + protectedtsProvider protectedts.Provider } func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*sqlServer, error) { execCfg := &sql.ExecutorConfig{} + codec := keys.MakeSQLCodec(cfg.tenantID) + var jobAdoptionStopFile string for _, spec := range cfg.Stores.Specs { if !spec.InMemory && spec.Path != "" { @@ -290,13 +296,14 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*sqlServer, error) { AmbientContext: cfg.AmbientCtx, Settings: cfg.Settings, RuntimeStats: cfg.runtime, + ClusterID: &cfg.rpcContext.ClusterID, + ClusterName: cfg.ClusterName, + NodeID: cfg.nodeIDContainer, + Codec: codec, DB: cfg.db, Executor: cfg.circularInternalExecutor, RPCContext: cfg.rpcContext, Stopper: cfg.stopper, - NodeID: cfg.nodeIDContainer, - ClusterID: &cfg.rpcContext.ClusterID, - ClusterName: cfg.ClusterName, TempStorage: tempEngine, TempStoragePath: cfg.TempStorageConfig.Path, @@ -362,6 +369,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*sqlServer, error) { *execCfg = sql.ExecutorConfig{ Settings: cfg.Settings, NodeInfo: nodeInfo, + Codec: codec, DefaultZoneConfig: &cfg.DefaultZoneConfig, Locality: cfg.Locality, AmbientCtx: cfg.AmbientCtx, diff --git a/pkg/server/settingsworker.go b/pkg/server/settingsworker.go index f3e697c8e697..172e40e0a8ad 100644 --- a/pkg/server/settingsworker.go +++ b/pkg/server/settingsworker.go @@ -31,7 +31,8 @@ func (s *Server) refreshSettings() { tbl := &sqlbase.SettingsTable a := &sqlbase.DatumAlloc{} - settingsTablePrefix := keys.TODOSQLCodec.TablePrefix(uint32(tbl.ID)) + codec := keys.TODOSQLCodec + settingsTablePrefix := codec.TablePrefix(uint32(tbl.ID)) colIdxMap := row.ColIDtoRowIndexFromCols(tbl.Columns) processKV := func(ctx context.Context, kv roachpb.KeyValue, u settings.Updater) error { @@ -44,7 +45,7 @@ func (s *Server) refreshSettings() { { types := []types.T{tbl.Columns[0].Type} nameRow := make([]sqlbase.EncDatum, 1) - _, matches, _, err := sqlbase.DecodeIndexKey(tbl, &tbl.PrimaryIndex, types, nameRow, nil, kv.Key) + _, matches, _, err := sqlbase.DecodeIndexKey(codec, tbl, &tbl.PrimaryIndex, types, nameRow, nil, kv.Key) if err != nil { return errors.Wrap(err, "failed to decode key") } diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index 21904c156b22..af8f2102442e 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -504,13 +504,14 @@ func testSQLServerArgs(ts *TestServer) sqlServerArgs { Config: &cfg, stopper: stopper, clock: clock, - protectedtsProvider: protectedTSProvider, runtime: status.NewRuntimeStatSampler(context.Background(), clock), + tenantID: roachpb.SystemTenantID, db: ts.DB(), registry: registry, sessionRegistry: sql.NewSessionRegistry(), circularInternalExecutor: circularInternalExecutor, jobRegistry: &jobs.Registry{}, + protectedtsProvider: protectedTSProvider, } } diff --git a/pkg/sql/alter_table.go b/pkg/sql/alter_table.go index 316bd6ed7bcf..52676ad52464 100644 --- a/pkg/sql/alter_table.go +++ b/pkg/sql/alter_table.go @@ -202,7 +202,8 @@ func (n *alterTableNode) startExec(params runParams) error { // We're checking to see if a user is trying add a non-nullable column without a default to a // non empty table by scanning the primary index span with a limit of 1 to see if any key exists. if !col.Nullable && (col.DefaultExpr == nil && !col.IsComputed()) { - kvs, err := params.p.txn.Scan(params.ctx, n.tableDesc.PrimaryIndexSpan().Key, n.tableDesc.PrimaryIndexSpan().EndKey, 1) + span := n.tableDesc.PrimaryIndexSpan(params.ExecCfg().Codec) + kvs, err := params.p.txn.Scan(params.ctx, span.Key, span.EndKey, 1) if err != nil { return err } @@ -342,7 +343,8 @@ func (n *alterTableNode) startExec(params runParams) error { // Check whether the table is empty, and pass the result to resolveFK(). If // the table is empty, then resolveFK will automatically add the necessary // index for a fk constraint if the index does not exist. - kvs, scanErr := params.p.txn.Scan(params.ctx, n.tableDesc.PrimaryIndexSpan().Key, n.tableDesc.PrimaryIndexSpan().EndKey, 1) + span := n.tableDesc.PrimaryIndexSpan(params.ExecCfg().Codec) + kvs, scanErr := params.p.txn.Scan(params.ctx, span.Key, span.EndKey, 1) if scanErr != nil { err = scanErr return diff --git a/pkg/sql/backfill.go b/pkg/sql/backfill.go index 503498f3963e..54ad609439f5 100644 --- a/pkg/sql/backfill.go +++ b/pkg/sql/backfill.go @@ -119,7 +119,7 @@ func (sc *SchemaChanger) makeFixedTimestampRunner(readAsOf hlc.Timestamp) histor runner := func(ctx context.Context, retryable scTxnFn) error { return sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *kv.Txn) error { // We need to re-create the evalCtx since the txn may retry. - evalCtx := createSchemaChangeEvalCtx(ctx, readAsOf, sc.ieFactory) + evalCtx := createSchemaChangeEvalCtx(ctx, sc.execCfg, readAsOf, sc.ieFactory) return retryable(ctx, txn, &evalCtx) }) } @@ -185,7 +185,7 @@ func (sc *SchemaChanger) runBackfill(ctx context.Context) error { needColumnBackfill = true } case *sqlbase.DescriptorMutation_Index: - addedIndexSpans = append(addedIndexSpans, tableDesc.IndexSpan(t.Index.ID)) + addedIndexSpans = append(addedIndexSpans, tableDesc.IndexSpan(sc.execCfg.Codec, t.Index.ID)) case *sqlbase.DescriptorMutation_Constraint: switch t.Constraint.ConstraintType { case sqlbase.ConstraintToUpdate_CHECK: @@ -628,7 +628,15 @@ func (sc *SchemaChanger) truncateIndexes( } rd, err := row.MakeDeleter( - ctx, txn, tableDesc, nil, nil, row.SkipFKs, nil /* *tree.EvalContext */, alloc, + ctx, + txn, + sc.execCfg.Codec, + tableDesc, + nil, + nil, + row.SkipFKs, + nil, /* *tree.EvalContext */ + alloc, ) if err != nil { return err @@ -852,7 +860,7 @@ func (sc *SchemaChanger) distBackfill( return nil } cbw := metadataCallbackWriter{rowResultWriter: &errOnlyResultWriter{}, fn: metaFn} - evalCtx := createSchemaChangeEvalCtx(ctx, txn.ReadTimestamp(), sc.ieFactory) + evalCtx := createSchemaChangeEvalCtx(ctx, sc.execCfg, txn.ReadTimestamp(), sc.ieFactory) recv := MakeDistSQLReceiver( ctx, &cbw, @@ -1060,8 +1068,9 @@ func (sc *SchemaChanger) validateInvertedIndexes( // distributed execution and avoid bypassing the SQL decoding start := timeutil.Now() var idxLen int64 - key := tableDesc.IndexSpan(idx.ID).Key - endKey := tableDesc.IndexSpan(idx.ID).EndKey + span := tableDesc.IndexSpan(sc.execCfg.Codec, idx.ID) + key := span.Key + endKey := span.EndKey if err := runHistoricalTxn(ctx, func(ctx context.Context, txn *kv.Txn, _ *extendedEvalContext) error { for { kvs, err := txn.Scan(ctx, key, endKey, 1000000) @@ -1354,7 +1363,7 @@ func runSchemaChangesInTxn( doneColumnBackfill = true case *sqlbase.DescriptorMutation_Index: - if err := indexBackfillInTxn(ctx, planner.Txn(), immutDesc, traceKV); err != nil { + if err := indexBackfillInTxn(ctx, planner.Txn(), planner.EvalContext(), immutDesc, traceKV); err != nil { return err } @@ -1416,7 +1425,7 @@ func runSchemaChangesInTxn( case *sqlbase.DescriptorMutation_Index: if err := indexTruncateInTxn( - ctx, planner.Txn(), planner.ExecCfg(), immutDesc, t.Index, traceKV, + ctx, planner.Txn(), planner.ExecCfg(), planner.EvalContext(), immutDesc, t.Index, traceKV, ); err != nil { return err } @@ -1687,7 +1696,7 @@ func columnBackfillInTxn( } otherTableDescs = append(otherTableDescs, t.ImmutableTableDescriptor) } - sp := tableDesc.PrimaryIndexSpan() + sp := tableDesc.PrimaryIndexSpan(evalCtx.Codec) for sp.Key != nil { var err error sp.Key, err = backfiller.RunColumnBackfillChunk(ctx, @@ -1706,13 +1715,17 @@ func columnBackfillInTxn( // It operates entirely on the current goroutine and is thus able to // reuse an existing kv.Txn safely. func indexBackfillInTxn( - ctx context.Context, txn *kv.Txn, tableDesc *sqlbase.ImmutableTableDescriptor, traceKV bool, + ctx context.Context, + txn *kv.Txn, + evalCtx *tree.EvalContext, + tableDesc *sqlbase.ImmutableTableDescriptor, + traceKV bool, ) error { var backfiller backfill.IndexBackfiller - if err := backfiller.Init(tableDesc); err != nil { + if err := backfiller.Init(evalCtx, tableDesc); err != nil { return err } - sp := tableDesc.PrimaryIndexSpan() + sp := tableDesc.PrimaryIndexSpan(evalCtx.Codec) for sp.Key != nil { var err error sp.Key, err = backfiller.RunIndexBackfillChunk(ctx, @@ -1731,6 +1744,7 @@ func indexTruncateInTxn( ctx context.Context, txn *kv.Txn, execCfg *ExecutorConfig, + evalCtx *tree.EvalContext, tableDesc *sqlbase.ImmutableTableDescriptor, idx *sqlbase.IndexDescriptor, traceKV bool, @@ -1739,13 +1753,13 @@ func indexTruncateInTxn( var sp roachpb.Span for done := false; !done; done = sp.Key == nil { rd, err := row.MakeDeleter( - ctx, txn, tableDesc, nil, nil, row.SkipFKs, nil /* *tree.EvalContext */, alloc, + ctx, txn, execCfg.Codec, tableDesc, nil, nil, row.SkipFKs, evalCtx, alloc, ) if err != nil { return err } td := tableDeleter{rd: rd, alloc: alloc} - if err := td.init(ctx, txn, nil /* *tree.EvalContext */); err != nil { + if err := td.init(ctx, txn, evalCtx); err != nil { return err } sp, err = td.deleteIndex( diff --git a/pkg/sql/backfill/backfill.go b/pkg/sql/backfill/backfill.go index 6c745b2e6211..eb824dd66f9d 100644 --- a/pkg/sql/backfill/backfill.go +++ b/pkg/sql/backfill/backfill.go @@ -121,6 +121,7 @@ func (cb *ColumnBackfiller) Init( ValNeededForCol: valNeededForCol, } return cb.fetcher.Init( + evalCtx.Codec, false, /* reverse */ sqlbase.ScanLockingStrength_FOR_NONE, false, /* returnRangeInfo */ @@ -175,6 +176,7 @@ func (cb *ColumnBackfiller) RunColumnBackfillChunk( ru, err := row.MakeUpdater( ctx, txn, + cb.evalCtx.Codec, tableDesc, fkTables, cb.updateCols, @@ -299,6 +301,7 @@ type IndexBackfiller struct { types []types.T rowVals tree.Datums + evalCtx *tree.EvalContext } // ContainsInvertedIndex returns true if backfilling an inverted index. @@ -312,7 +315,10 @@ func (ib *IndexBackfiller) ContainsInvertedIndex() bool { } // Init initializes an IndexBackfiller. -func (ib *IndexBackfiller) Init(desc *sqlbase.ImmutableTableDescriptor) error { +func (ib *IndexBackfiller) Init( + evalCtx *tree.EvalContext, desc *sqlbase.ImmutableTableDescriptor, +) error { + ib.evalCtx = evalCtx numCols := len(desc.Columns) cols := desc.Columns if len(desc.Mutations) > 0 { @@ -364,6 +370,7 @@ func (ib *IndexBackfiller) Init(desc *sqlbase.ImmutableTableDescriptor) error { ValNeededForCol: valNeededForCol, } return ib.fetcher.Init( + evalCtx.Codec, false, /* reverse */ sqlbase.ScanLockingStrength_FOR_NONE, false, /* returnRangeInfo */ @@ -426,8 +433,14 @@ func (ib *IndexBackfiller) BuildIndexEntriesChunk( // not want to include empty k/v pairs while backfilling. buffer = buffer[:0] if buffer, err = sqlbase.EncodeSecondaryIndexes( - tableDesc.TableDesc(), ib.added, ib.colIdxMap, - ib.rowVals, buffer, false /* includeEmpty */); err != nil { + ib.evalCtx.Codec, + tableDesc.TableDesc(), + ib.added, + ib.colIdxMap, + ib.rowVals, + buffer, + false, /* includeEmpty */ + ); err != nil { return nil, nil, err } entries = append(entries, buffer...) diff --git a/pkg/sql/colexec/cfetcher.go b/pkg/sql/colexec/cfetcher.go index 16236074d33a..9c42f72eee73 100644 --- a/pkg/sql/colexec/cfetcher.go +++ b/pkg/sql/colexec/cfetcher.go @@ -245,6 +245,7 @@ type cFetcher struct { // non-primary index, tables.ValNeededForCol can only refer to columns in the // index. func (rf *cFetcher) Init( + codec keys.SQLCodec, allocator *colmem.Allocator, reverse bool, lockStr sqlbase.ScanLockingStrength, @@ -323,7 +324,7 @@ func (rf *cFetcher) Init( } sort.Ints(table.neededColsList) - table.knownPrefixLength = len(sqlbase.MakeIndexKeyPrefix(table.desc.TableDesc(), table.index.ID)) + table.knownPrefixLength = len(sqlbase.MakeIndexKeyPrefix(codec, table.desc.TableDesc(), table.index.ID)) var indexColumnIDs []sqlbase.ColumnID indexColumnIDs, table.indexColumnDirs = table.index.FullColumnIDs() diff --git a/pkg/sql/colexec/colbatch_scan.go b/pkg/sql/colexec/colbatch_scan.go index c1accb8cb359..48943deb1280 100644 --- a/pkg/sql/colexec/colbatch_scan.go +++ b/pkg/sql/colexec/colbatch_scan.go @@ -14,6 +14,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/col/coldata" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/colexecbase" "github.com/cockroachdb/cockroach/pkg/sql/colexecbase/colexecerror" @@ -128,8 +129,8 @@ func newColBatchScan( columnIdxMap := spec.Table.ColumnIdxMapWithMutations(returnMutations) fetcher := cFetcher{} if _, _, err := initCRowFetcher( - allocator, &fetcher, &spec.Table, int(spec.IndexIdx), columnIdxMap, spec.Reverse, - neededColumns, spec.IsCheck, spec.Visibility, spec.LockingStrength, + flowCtx.Codec(), allocator, &fetcher, &spec.Table, int(spec.IndexIdx), columnIdxMap, + spec.Reverse, neededColumns, spec.IsCheck, spec.Visibility, spec.LockingStrength, ); err != nil { return nil, err } @@ -150,6 +151,7 @@ func newColBatchScan( // initCRowFetcher initializes a row.cFetcher. See initRowFetcher. func initCRowFetcher( + codec keys.SQLCodec, allocator *colmem.Allocator, fetcher *cFetcher, desc *sqlbase.TableDescriptor, @@ -180,7 +182,7 @@ func initCRowFetcher( ValNeededForCol: valNeededForCol, } if err := fetcher.Init( - allocator, reverseScan, lockStr, true /* returnRangeInfo */, isCheck, tableArgs, + codec, allocator, reverseScan, lockStr, true /* returnRangeInfo */, isCheck, tableArgs, ); err != nil { return nil, false, err } diff --git a/pkg/sql/colflow/colbatch_scan_test.go b/pkg/sql/colflow/colbatch_scan_test.go index 92e396567626..51cf3778d1ed 100644 --- a/pkg/sql/colflow/colbatch_scan_test.go +++ b/pkg/sql/colflow/colbatch_scan_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/colexec" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -56,7 +57,9 @@ func BenchmarkColBatchScan(b *testing.B) { Core: execinfrapb.ProcessorCoreUnion{ TableReader: &execinfrapb.TableReaderSpec{ Table: *tableDesc, - Spans: []execinfrapb.TableReaderSpan{{Span: tableDesc.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{ + {Span: tableDesc.PrimaryIndexSpan(keys.SystemSQLCodec)}, + }, }}, Post: execinfrapb.PostProcessSpec{ Projection: true, diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index 7d2a8e9374db..3bbe974b962e 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -1972,6 +1972,7 @@ func (ex *connExecutor) initEvalCtx(ctx context.Context, evalCtx *extendedEvalCo ClusterID: ex.server.cfg.ClusterID(), ClusterName: ex.server.cfg.RPCContext.ClusterName(), NodeID: ex.server.cfg.NodeID.Get(), + Codec: ex.server.cfg.Codec, Locality: ex.server.cfg.Locality, ReCache: ex.server.reCache, InternalExecutor: &ie, diff --git a/pkg/sql/conn_executor_internal_test.go b/pkg/sql/conn_executor_internal_test.go index e1d320b5e08b..4bbeea2f943d 100644 --- a/pkg/sql/conn_executor_internal_test.go +++ b/pkg/sql/conn_executor_internal_test.go @@ -16,6 +16,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -266,6 +267,7 @@ func startConnExecutor( NodeID: nodeID, ClusterID: func() uuid.UUID { return uuid.UUID{} }, }, + Codec: keys.SystemSQLCodec, DistSQLPlanner: NewDistSQLPlanner( ctx, execinfra.Version, st, roachpb.NodeDescriptor{NodeID: 1}, nil, /* rpcCtx */ diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index db895ae08494..450c7bb3ce41 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -2895,7 +2895,7 @@ func addPartitioningRows( buf.WriteString(`, `) } tuple, _, err := sqlbase.DecodePartitionTuple( - &datumAlloc, table, index, partitioning, values, fakePrefixDatums, + &datumAlloc, p.ExecCfg().Codec, table, index, partitioning, values, fakePrefixDatums, ) if err != nil { return err @@ -2946,7 +2946,7 @@ func addPartitioningRows( for _, r := range partitioning.Range { var buf bytes.Buffer fromTuple, _, err := sqlbase.DecodePartitionTuple( - &datumAlloc, table, index, partitioning, r.FromInclusive, fakePrefixDatums, + &datumAlloc, p.ExecCfg().Codec, table, index, partitioning, r.FromInclusive, fakePrefixDatums, ) if err != nil { return err @@ -2954,7 +2954,7 @@ func addPartitioningRows( buf.WriteString(fromTuple.String()) buf.WriteString(" TO ") toTuple, _, err := sqlbase.DecodePartitionTuple( - &datumAlloc, table, index, partitioning, r.ToExclusive, fakePrefixDatums, + &datumAlloc, p.ExecCfg().Codec, table, index, partitioning, r.ToExclusive, fakePrefixDatums, ) if err != nil { return err diff --git a/pkg/sql/create_sequence.go b/pkg/sql/create_sequence.go index 5eb18121bbd1..cf1ec6b46322 100644 --- a/pkg/sql/create_sequence.go +++ b/pkg/sql/create_sequence.go @@ -126,7 +126,7 @@ func doCreateSequence( } // Initialize the sequence value. - seqValueKey := keys.TODOSQLCodec.SequenceKey(uint32(id)) + seqValueKey := params.ExecCfg().Codec.SequenceKey(uint32(id)) b := &kv.Batch{} b.Inc(seqValueKey, desc.SequenceOpts.Start-desc.SequenceOpts.Increment) if err := params.p.txn.Run(params.ctx, b); err != nil { diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index fe9877c44f92..3fc849836523 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -383,6 +383,7 @@ func (n *createTableNode) startExec(params runParams) error { ri, err := row.MakeInserter( params.ctx, params.p.txn, + params.ExecCfg().Codec, sqlbase.NewImmutableTableDescriptor(*desc.TableDesc()), desc.Columns, row.SkipFKs, @@ -497,7 +498,7 @@ func (p *planner) resolveFK( ts FKTableState, validationBehavior tree.ValidationBehavior, ) error { - return ResolveFK(ctx, p.txn, p, tbl, d, backrefs, ts, validationBehavior, p.ExecCfg().Settings) + return ResolveFK(ctx, p.txn, p, tbl, d, backrefs, ts, validationBehavior, p.EvalContext()) } func qualifyFKColErrorWithDB( @@ -607,7 +608,7 @@ func ResolveFK( backrefs map[sqlbase.ID]*sqlbase.MutableTableDescriptor, ts FKTableState, validationBehavior tree.ValidationBehavior, - settings *cluster.Settings, + evalCtx *tree.EvalContext, ) error { originColumnIDs := make(sqlbase.ColumnIDs, len(d.FromCols)) for i, col := range d.FromCols { @@ -1655,7 +1656,9 @@ func MakeTableDesc( desc.Checks = append(desc.Checks, ck) case *tree.ForeignKeyConstraintTableDef: - if err := ResolveFK(ctx, txn, fkResolver, &desc, d, affected, NewTable, tree.ValidationDefault, st); err != nil { + if err := ResolveFK( + ctx, txn, fkResolver, &desc, d, affected, NewTable, tree.ValidationDefault, evalCtx, + ); err != nil { return desc, err } diff --git a/pkg/sql/delete_range.go b/pkg/sql/delete_range.go index f00594e32652..87dfc5aba03e 100644 --- a/pkg/sql/delete_range.go +++ b/pkg/sql/delete_range.go @@ -184,6 +184,7 @@ func (d *deleteRangeNode) startExec(params runParams) error { } } if err := d.fetcher.Init( + params.ExecCfg().Codec, false, /* reverse */ // TODO(nvanbenschoten): it might make sense to use a FOR_UPDATE locking // strength here. Consider hooking this in to the same knob that will diff --git a/pkg/sql/distsql/server.go b/pkg/sql/distsql/server.go index b8cc3af47bdc..5f5cf027ee92 100644 --- a/pkg/sql/distsql/server.go +++ b/pkg/sql/distsql/server.go @@ -297,6 +297,7 @@ func (ds *ServerImpl) setupFlow( ClusterID: ds.ServerConfig.ClusterID.Get(), ClusterName: ds.ServerConfig.ClusterName, NodeID: nodeID, + Codec: ds.ServerConfig.Codec, ReCache: ds.regexpCache, Mon: &monitor, // Most processors will override this Context with their own context in diff --git a/pkg/sql/distsql_plan_csv.go b/pkg/sql/distsql_plan_csv.go index b06e6b9a339e..c842211cc4f1 100644 --- a/pkg/sql/distsql_plan_csv.go +++ b/pkg/sql/distsql_plan_csv.go @@ -188,7 +188,7 @@ func presplitTableBoundaries( ) error { expirationTime := cfg.DB.Clock().Now().Add(time.Hour.Nanoseconds(), 0) for _, tbl := range tables { - for _, span := range tbl.Desc.AllIndexSpans() { + for _, span := range tbl.Desc.AllIndexSpans(cfg.Codec) { if err := cfg.DB.AdminSplit(ctx, span.Key, span.Key, expirationTime); err != nil { return err } diff --git a/pkg/sql/distsql_plan_stats.go b/pkg/sql/distsql_plan_stats.go index a71562df0f99..227d74af2fb5 100644 --- a/pkg/sql/distsql_plan_stats.go +++ b/pkg/sql/distsql_plan_stats.go @@ -82,7 +82,7 @@ func (dsp *DistSQLPlanner) createStatsPlan( if err != nil { return PhysicalPlan{}, err } - sb := span.MakeBuilder(desc.TableDesc(), scan.index) + sb := span.MakeBuilder(planCtx.planner.ExecCfg().Codec, desc.TableDesc(), scan.index) scan.spans, err = sb.UnconstrainedSpans() if err != nil { return PhysicalPlan{}, err diff --git a/pkg/sql/drop_index.go b/pkg/sql/drop_index.go index ffbd008ad3f2..cbaa2641091e 100644 --- a/pkg/sql/drop_index.go +++ b/pkg/sql/drop_index.go @@ -263,7 +263,13 @@ func (p *planner) dropIndexByName( for _, s := range zone.Subzones { if s.IndexID != uint32(idx.ID) { _, err = GenerateSubzoneSpans( - p.ExecCfg().Settings, p.ExecCfg().ClusterID(), tableDesc.TableDesc(), zone.Subzones, false /* newSubzones */) + p.ExecCfg().Settings, + p.ExecCfg().ClusterID(), + p.ExecCfg().Codec, + tableDesc.TableDesc(), + zone.Subzones, + false, /* newSubzones */ + ) if sqlbase.IsCCLRequiredError(err) { return sqlbase.NewCCLRequiredError(fmt.Errorf("schema change requires a CCL binary "+ "because table %q has at least one remaining index or partition with a zone config", @@ -434,7 +440,7 @@ func (p *planner) dropIndexByName( if idxEntry.ID == idx.ID { // Unsplit all manually split ranges in the index so they can be // automatically merged by the merge queue. - span := tableDesc.IndexSpan(idxEntry.ID) + span := tableDesc.IndexSpan(p.ExecCfg().Codec, idxEntry.ID) ranges, err := ScanMetaKVs(ctx, p.txn, span) if err != nil { return err diff --git a/pkg/sql/drop_table.go b/pkg/sql/drop_table.go index 609b329ab6d6..cff27025628d 100644 --- a/pkg/sql/drop_table.go +++ b/pkg/sql/drop_table.go @@ -362,7 +362,7 @@ func (p *planner) initiateDropTable( // Unsplit all manually split ranges in the table so they can be // automatically merged by the merge queue. - ranges, err := ScanMetaKVs(ctx, p.txn, tableDesc.TableSpan()) + ranges, err := ScanMetaKVs(ctx, p.txn, tableDesc.TableSpan(p.ExecCfg().Codec)) if err != nil { return err } diff --git a/pkg/sql/drop_test.go b/pkg/sql/drop_test.go index d3776bad248b..4630940b8b31 100644 --- a/pkg/sql/drop_test.go +++ b/pkg/sql/drop_test.go @@ -187,7 +187,7 @@ INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); t.Fatal(err) } - tableSpan := tbDesc.TableSpan() + tableSpan := tbDesc.TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, 6) if _, err := sqlDB.Exec(`DROP DATABASE t RESTRICT`); !testutils.IsError(err, @@ -360,8 +360,8 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); } tb2Desc := desc.Table(ts) - tableSpan := tbDesc.TableSpan() - table2Span := tb2Desc.TableSpan() + tableSpan := tbDesc.TableSpan(keys.SystemSQLCodec) + table2Span := tb2Desc.TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, 6) tests.CheckKeyCount(t, kvDB, table2Span, 6) @@ -538,12 +538,12 @@ func TestDropIndex(t *testing.T) { t.Fatal(err) } tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") - tests.CheckKeyCount(t, kvDB, tableDesc.TableSpan(), 3*numRows) + tests.CheckKeyCount(t, kvDB, tableDesc.TableSpan(keys.SystemSQLCodec), 3*numRows) idx, _, err := tableDesc.FindIndexByName("foo") if err != nil { t.Fatal(err) } - indexSpan := tableDesc.IndexSpan(idx.ID) + indexSpan := tableDesc.IndexSpan(keys.SystemSQLCodec, idx.ID) tests.CheckKeyCount(t, kvDB, indexSpan, numRows) if _, err := sqlDB.Exec(`DROP INDEX t.kv@foo`); err != nil { t.Fatal(err) @@ -555,7 +555,7 @@ func TestDropIndex(t *testing.T) { } // Index data hasn't been deleted. tests.CheckKeyCount(t, kvDB, indexSpan, numRows) - tests.CheckKeyCount(t, kvDB, tableDesc.TableSpan(), 3*numRows) + tests.CheckKeyCount(t, kvDB, tableDesc.TableSpan(keys.SystemSQLCodec), 3*numRows) // TODO (lucy): Maybe this test API should use an offset starting // from the most recent job instead. @@ -580,9 +580,9 @@ func TestDropIndex(t *testing.T) { if err != nil { t.Fatal(err) } - newIdxSpan := tableDesc.IndexSpan(newIdx.ID) + newIdxSpan := tableDesc.IndexSpan(keys.SystemSQLCodec, newIdx.ID) tests.CheckKeyCount(t, kvDB, newIdxSpan, numRows) - tests.CheckKeyCount(t, kvDB, tableDesc.TableSpan(), 4*numRows) + tests.CheckKeyCount(t, kvDB, tableDesc.TableSpan(keys.SystemSQLCodec), 4*numRows) clearIndexAttempt = true // Add a zone config for the table. @@ -616,7 +616,7 @@ func TestDropIndex(t *testing.T) { tests.CheckKeyCount(t, kvDB, newIdxSpan, numRows) tests.CheckKeyCount(t, kvDB, indexSpan, 0) - tests.CheckKeyCount(t, kvDB, tableDesc.TableSpan(), 3*numRows) + tests.CheckKeyCount(t, kvDB, tableDesc.TableSpan(keys.SystemSQLCodec), 3*numRows) } func TestDropIndexWithZoneConfigOSS(t *testing.T) { @@ -642,7 +642,7 @@ func TestDropIndexWithZoneConfigOSS(t *testing.T) { if err != nil { t.Fatal(err) } - indexSpan := tableDesc.IndexSpan(indexDesc.ID) + indexSpan := tableDesc.IndexSpan(keys.SystemSQLCodec, indexDesc.ID) tests.CheckKeyCount(t, kvDB, indexSpan, numRows) // Hack in zone configs for the primary and secondary indexes. (You need a CCL @@ -698,7 +698,7 @@ func TestDropIndexInterleaved(t *testing.T) { tests.CreateKVInterleavedTable(t, sqlDB, numRows) tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") - tableSpan := tableDesc.TableSpan() + tableSpan := tableDesc.TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, 3*numRows) @@ -750,7 +750,7 @@ func TestDropTable(t *testing.T) { t.Fatal(err) } - tableSpan := tableDesc.TableSpan() + tableSpan := tableDesc.TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, 3*numRows) if _, err := sqlDB.Exec(`DROP TABLE t.kv`); err != nil { t.Fatal(err) @@ -837,7 +837,7 @@ func TestDropTableDeleteData(t *testing.T) { t.Fatalf("Name entry %q does not exist", nameKey) } - tableSpan := descs[i].TableSpan() + tableSpan := descs[i].TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, numKeys) if _, err := sqlDB.Exec(fmt.Sprintf(`DROP TABLE t.%s`, tableName)); err != nil { @@ -855,7 +855,7 @@ func TestDropTableDeleteData(t *testing.T) { if err := descExists(sqlDB, true, descs[i].ID); err != nil { t.Fatal(err) } - tableSpan := descs[i].TableSpan() + tableSpan := descs[i].TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, numKeys) if err := jobutils.VerifySystemJob(t, sqlRun, 2*i+1+migrationJobOffset, jobspb.TypeSchemaChange, jobs.StatusSucceeded, jobs.Record{ @@ -884,7 +884,7 @@ func TestDropTableDeleteData(t *testing.T) { return zoneExists(sqlDB, nil, descs[i].ID) }) - tableSpan := descs[i].TableSpan() + tableSpan := descs[i].TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, 0) // Ensure that the job is marked as succeeded. @@ -923,7 +923,7 @@ func TestDropTableDeleteData(t *testing.T) { checkTableGCed(i) } else { // Data still present for tables past barrier. - tableSpan := descs[i].TableSpan() + tableSpan := descs[i].TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, numKeys) } } @@ -985,7 +985,7 @@ func TestDropTableWhileUpgradingFormat(t *testing.T) { t.Fatal(err) } - tableSpan := tableDesc.TableSpan() + tableSpan := tableDesc.TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, numRows) sqlDB.Exec(t, `DROP TABLE test.t`) @@ -1035,7 +1035,7 @@ func TestDropTableInterleavedDeleteData(t *testing.T) { tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") tableDescInterleaved := sqlbase.GetTableDescriptor(kvDB, "t", "intlv") - tableSpan := tableDesc.TableSpan() + tableSpan := tableDesc.TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, 3*numRows) if _, err := sqlDB.Exec(`DROP TABLE t.intlv`); err != nil { diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index 7e244dc9da4b..b9cf1eef6f96 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -29,6 +29,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" @@ -586,6 +587,7 @@ type nodeStatusGenerator interface { type ExecutorConfig struct { Settings *cluster.Settings NodeInfo + Codec keys.SQLCodec DefaultZoneConfig *zonepb.ZoneConfig Locality roachpb.Locality AmbientCtx log.AmbientContext diff --git a/pkg/sql/execinfra/flow_context.go b/pkg/sql/execinfra/flow_context.go index 07984895dcaa..32780d6c88a9 100644 --- a/pkg/sql/execinfra/flow_context.go +++ b/pkg/sql/execinfra/flow_context.go @@ -13,6 +13,7 @@ package execinfra import ( + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -81,3 +82,8 @@ func (ctx *FlowCtx) TestingKnobs() TestingKnobs { func (ctx *FlowCtx) Stopper() *stop.Stopper { return ctx.Cfg.Stopper } + +// Codec returns the SQL codec for this flowCtx. +func (ctx *FlowCtx) Codec() keys.SQLCodec { + return ctx.EvalCtx.Codec +} diff --git a/pkg/sql/execinfra/server_config.go b/pkg/sql/execinfra/server_config.go index 789a8e064c7c..1556c184e254 100644 --- a/pkg/sql/execinfra/server_config.go +++ b/pkg/sql/execinfra/server_config.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/diskmap" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" @@ -83,6 +84,15 @@ type ServerConfig struct { Settings *cluster.Settings RuntimeStats RuntimeStats + ClusterID *base.ClusterIDContainer + ClusterName string + + // NodeID is the id of the node on which this Server is running. + NodeID *base.NodeIDContainer + + // Codec is capable of encoding and decoding sql table keys. + Codec keys.SQLCodec + // DB is a handle to the cluster. DB *kv.DB // Executor can be used to run "internal queries". Note that Flows also have @@ -124,11 +134,6 @@ type ServerConfig struct { Metrics *DistSQLMetrics - // NodeID is the id of the node on which this Server is running. - NodeID *base.NodeIDContainer - ClusterID *base.ClusterIDContainer - ClusterName string - // JobRegistry manages jobs being used by this Server. JobRegistry *jobs.Registry diff --git a/pkg/sql/flowinfra/cluster_test.go b/pkg/sql/flowinfra/cluster_test.go index 444ca79a0039..e217bb9caf52 100644 --- a/pkg/sql/flowinfra/cluster_test.go +++ b/pkg/sql/flowinfra/cluster_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" @@ -67,7 +68,7 @@ func TestClusterFlow(t *testing.T) { desc := sqlbase.GetTableDescriptor(kvDB, "test", "t") makeIndexSpan := func(start, end int) execinfrapb.TableReaderSpan { var span roachpb.Span - prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(desc, desc.Indexes[0].ID)) + prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, desc, desc.Indexes[0].ID)) span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) span.EndKey = append(span.EndKey, prefix...) span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) diff --git a/pkg/sql/flowinfra/server_test.go b/pkg/sql/flowinfra/server_test.go index 3946357ec6fd..30086dacf5c6 100644 --- a/pkg/sql/flowinfra/server_test.go +++ b/pkg/sql/flowinfra/server_test.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -53,7 +54,7 @@ func TestServer(t *testing.T) { Table: *td, IndexIdx: 0, Reverse: false, - Spans: []execinfrapb.TableReaderSpan{{Span: td.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td.PrimaryIndexSpan(keys.SystemSQLCodec)}}, } post := execinfrapb.PostProcessSpec{ Filter: execinfrapb.Expression{Expr: "@1 != 2"}, // a != 2 diff --git a/pkg/sql/gcjob/index_garbage_collection.go b/pkg/sql/gcjob/index_garbage_collection.go index 33565eaaa22b..6284fdafc137 100644 --- a/pkg/sql/gcjob/index_garbage_collection.go +++ b/pkg/sql/gcjob/index_garbage_collection.go @@ -52,7 +52,7 @@ func gcIndexes( } indexDesc := sqlbase.IndexDescriptor{ID: index.IndexID} - if err := clearIndex(ctx, execCfg.DB, parentTable, indexDesc); err != nil { + if err := clearIndex(ctx, execCfg, parentTable, indexDesc); err != nil { return false, errors.Wrapf(err, "clearing index %d", indexDesc.ID) } @@ -76,14 +76,17 @@ func gcIndexes( // clearIndexes issues Clear Range requests over all specified indexes. func clearIndex( - ctx context.Context, db *kv.DB, tableDesc *sqlbase.TableDescriptor, index sqlbase.IndexDescriptor, + ctx context.Context, + execCfg *sql.ExecutorConfig, + tableDesc *sqlbase.TableDescriptor, + index sqlbase.IndexDescriptor, ) error { log.Infof(ctx, "clearing index %d from table %d", index.ID, tableDesc.ID) if index.IsInterleaved() { return errors.Errorf("unexpected interleaved index %d", index.ID) } - sp := tableDesc.IndexSpan(index.ID) + sp := tableDesc.IndexSpan(execCfg.Codec, index.ID) // ClearRange cannot be run in a transaction, so create a // non-transactional batch to send the request. @@ -94,7 +97,7 @@ func clearIndex( EndKey: sp.EndKey, }, }) - return db.Run(ctx, b) + return execCfg.DB.Run(ctx, b) } // completeDroppedIndexes updates the mutations of the table descriptor to diff --git a/pkg/sql/gcjob/refresh_statuses.go b/pkg/sql/gcjob/refresh_statuses.go index 10ad836c0c93..528a49301aed 100644 --- a/pkg/sql/gcjob/refresh_statuses.go +++ b/pkg/sql/gcjob/refresh_statuses.go @@ -104,7 +104,7 @@ func updateStatusForGCElements( // Update the status of the table if the table was dropped. if table.Dropped() { - deadline := updateTableStatus(ctx, int64(tableTTL), protectedtsCache, table, tableDropTimes, progress) + deadline := updateTableStatus(ctx, execCfg, int64(tableTTL), protectedtsCache, table, tableDropTimes, progress) if timeutil.Until(deadline) < 0 { expired = true } else if deadline.Before(earliestDeadline) { @@ -113,7 +113,7 @@ func updateStatusForGCElements( } // Update the status of any indexes waiting for GC. - indexesExpired, deadline := updateIndexesStatus(ctx, tableTTL, table, protectedtsCache, placeholder, indexDropTimes, progress) + indexesExpired, deadline := updateIndexesStatus(ctx, execCfg, tableTTL, table, protectedtsCache, placeholder, indexDropTimes, progress) if indexesExpired { expired = true } @@ -134,6 +134,7 @@ func updateStatusForGCElements( // expired. func updateTableStatus( ctx context.Context, + execCfg *sql.ExecutorConfig, ttlSeconds int64, protectedtsCache protectedts.Cache, table *sqlbase.TableDescriptor, @@ -141,7 +142,7 @@ func updateTableStatus( progress *jobspb.SchemaChangeGCProgress, ) time.Time { deadline := timeutil.Unix(0, int64(math.MaxInt64)) - sp := table.TableSpan() + sp := table.TableSpan(execCfg.Codec) for i, t := range progress.Tables { droppedTable := &progress.Tables[i] @@ -179,6 +180,7 @@ func updateTableStatus( // index should be GC'd, if any, otherwise MaxInt. func updateIndexesStatus( ctx context.Context, + execCfg *sql.ExecutorConfig, tableTTL int32, table *sqlbase.TableDescriptor, protectedtsCache protectedts.Cache, @@ -194,7 +196,7 @@ func updateIndexesStatus( continue } - sp := table.IndexSpan(idxProgress.IndexID) + sp := table.IndexSpan(execCfg.Codec, idxProgress.IndexID) ttlSeconds := getIndexTTL(tableTTL, placeholder, idxProgress.IndexID) diff --git a/pkg/sql/gcjob/table_garbage_collection.go b/pkg/sql/gcjob/table_garbage_collection.go index a4204df47389..01384196f67a 100644 --- a/pkg/sql/gcjob/table_garbage_collection.go +++ b/pkg/sql/gcjob/table_garbage_collection.go @@ -57,7 +57,7 @@ func gcTables( } // First, delete all the table data. - if err := clearTableData(ctx, execCfg.DB, execCfg.DistSender, table); err != nil { + if err := clearTableData(ctx, execCfg.DB, execCfg.DistSender, execCfg.Codec, table); err != nil { return false, errors.Wrapf(err, "clearing data for table %d", table.ID) } @@ -75,7 +75,11 @@ func gcTables( // clearTableData deletes all of the data in the specified table. func clearTableData( - ctx context.Context, db *kv.DB, distSender *kvcoord.DistSender, table *sqlbase.TableDescriptor, + ctx context.Context, + db *kv.DB, + distSender *kvcoord.DistSender, + codec keys.SQLCodec, + table *sqlbase.TableDescriptor, ) error { // If DropTime isn't set, assume this drop request is from a version // 1.1 server and invoke legacy code that uses DeleteRange and range GC. @@ -84,11 +88,11 @@ func clearTableData( // cleaned up. if table.DropTime == 0 || table.IsInterleaved() { log.Infof(ctx, "clearing data in chunks for table %d", table.ID) - return sql.ClearTableDataInChunks(ctx, table, db, false /* traceKV */) + return sql.ClearTableDataInChunks(ctx, db, codec, table, false /* traceKV */) } log.Infof(ctx, "clearing data for table %d", table.ID) - tableKey := roachpb.RKey(keys.TODOSQLCodec.TablePrefix(uint32(table.ID))) + tableKey := roachpb.RKey(codec.TablePrefix(uint32(table.ID))) tableSpan := roachpb.RSpan{Key: tableKey, EndKey: tableKey.PrefixEnd()} // ClearRange requests lays down RocksDB range deletion tombstones that have diff --git a/pkg/sql/insert_fast_path.go b/pkg/sql/insert_fast_path.go index 63ac64218ea2..34d4a3a6812a 100644 --- a/pkg/sql/insert_fast_path.go +++ b/pkg/sql/insert_fast_path.go @@ -97,12 +97,14 @@ type insertFastPathFKCheck struct { spanBuilder *span.Builder } -func (c *insertFastPathFKCheck) init() error { +func (c *insertFastPathFKCheck) init(params runParams) error { idx := c.ReferencedIndex.(*optIndex) c.tabDesc = c.ReferencedTable.(*optTable).desc c.idxDesc = idx.desc - c.keyPrefix = sqlbase.MakeIndexKeyPrefix(&c.tabDesc.TableDescriptor, c.idxDesc.ID) - c.spanBuilder = span.MakeBuilder(c.tabDesc.TableDesc(), c.idxDesc) + + codec := params.ExecCfg().Codec + c.keyPrefix = sqlbase.MakeIndexKeyPrefix(codec, &c.tabDesc.TableDescriptor, c.idxDesc.ID) + c.spanBuilder = span.MakeBuilder(codec, c.tabDesc.TableDesc(), c.idxDesc) if len(c.InsertCols) > idx.numLaxKeyCols { return errors.AssertionFailedf( @@ -238,7 +240,7 @@ func (n *insertFastPathNode) startExec(params runParams) error { if len(n.run.fkChecks) > 0 { for i := range n.run.fkChecks { - if err := n.run.fkChecks[i].init(); err != nil { + if err := n.run.fkChecks[i].init(params); err != nil { return err } } diff --git a/pkg/sql/join_test.go b/pkg/sql/join_test.go index 17ce9ed0f23a..9dbcf9bf829b 100644 --- a/pkg/sql/join_test.go +++ b/pkg/sql/join_test.go @@ -13,6 +13,7 @@ package sql import ( "fmt" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/span" @@ -45,7 +46,7 @@ func newTestScanNode(kvDB *kv.DB, tableName string) (*scanNode, error) { } } scan.reqOrdering = ordering - sb := span.MakeBuilder(desc.TableDesc(), &desc.PrimaryIndex) + sb := span.MakeBuilder(keys.SystemSQLCodec, desc.TableDesc(), &desc.PrimaryIndex) scan.spans, err = sb.SpansFromConstraint(nil /* constraint */, exec.TableColumnOrdinalSet{}, false /* forDelete */) if err != nil { return nil, err diff --git a/pkg/sql/lease_test.go b/pkg/sql/lease_test.go index fe7e15684919..f43c81480419 100644 --- a/pkg/sql/lease_test.go +++ b/pkg/sql/lease_test.go @@ -1086,7 +1086,7 @@ INSERT INTO t.kv VALUES ('a', 'b'); t.Fatal(err) } - tableSpan := tableDesc.TableSpan() + tableSpan := tableDesc.TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, 4) // Allow async schema change waiting for GC to complete (when dropping an diff --git a/pkg/sql/opt_catalog.go b/pkg/sql/opt_catalog.go index 1aebabd3a026..6999f7a6ee1c 100644 --- a/pkg/sql/opt_catalog.go +++ b/pkg/sql/opt_catalog.go @@ -369,7 +369,7 @@ func (oc *optCatalog) dataSourceForTable( return ds, nil } - ds, err := newOptTable(desc, tableStats, zoneConfig) + ds, err := newOptTable(desc, oc.planner.ExecCfg().Codec, tableStats, zoneConfig) if err != nil { return nil, err } @@ -508,6 +508,9 @@ type optTable struct { // indexes. indexes []optIndex + // codec is capable of encoding sql table keys. + codec keys.SQLCodec + // rawStats stores the original table statistics slice. Used for a fast-path // check that the statistics haven't changed. rawStats []*stats.TableStatistic @@ -539,10 +542,14 @@ type optTable struct { var _ cat.Table = &optTable{} func newOptTable( - desc *sqlbase.ImmutableTableDescriptor, stats []*stats.TableStatistic, tblZone *zonepb.ZoneConfig, + desc *sqlbase.ImmutableTableDescriptor, + codec keys.SQLCodec, + stats []*stats.TableStatistic, + tblZone *zonepb.ZoneConfig, ) (*optTable, error) { ot := &optTable{ desc: desc, + codec: codec, rawStats: stats, zone: tblZone, } @@ -983,7 +990,7 @@ func (oi *optIndex) Span() roachpb.Span { if desc.ID <= keys.MaxSystemConfigDescID { return keys.SystemConfigSpan } - return desc.IndexSpan(oi.desc.ID) + return desc.IndexSpan(oi.tab.codec, oi.desc.ID) } // Table is part of the cat.Index interface. @@ -1007,7 +1014,7 @@ func (oi *optIndex) PartitionByListPrefixes() []tree.Datums { for i := range list { for _, valueEncBuf := range list[i].Values { t, _, err := sqlbase.DecodePartitionTuple( - &a, &oi.tab.desc.TableDescriptor, oi.desc, &oi.desc.Partitioning, + &a, oi.tab.codec, &oi.tab.desc.TableDescriptor, oi.desc, &oi.desc.Partitioning, valueEncBuf, nil, /* prefixDatums */ ) if err != nil { diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index f7417b8e6237..c0d25a1d9e29 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -90,7 +90,7 @@ func (ef *execFactory) ConstructScan( scan := ef.planner.Scan() colCfg := makeScanColumnsConfig(table, needed) - sb := span.MakeBuilder(tabDesc.TableDesc(), indexDesc) + sb := span.MakeBuilder(ef.planner.ExecCfg().Codec, tabDesc.TableDesc(), indexDesc) // initTable checks that the current user has the correct privilege to access // the table. However, the privilege has already been checked in optbuilder, @@ -120,7 +120,9 @@ func (ef *execFactory) ConstructScan( if err != nil { return nil, err } - scan.isFull = len(scan.spans) == 1 && scan.spans[0].EqualValue(scan.desc.IndexSpan(scan.index.ID)) + scan.isFull = len(scan.spans) == 1 && scan.spans[0].EqualValue( + scan.desc.IndexSpan(ef.planner.ExecCfg().Codec, scan.index.ID), + ) for i := range reqOrdering { if reqOrdering[i].ColIdx >= len(colCfg.wantedColumns) { return nil, errors.Errorf("invalid reqOrdering: %v", reqOrdering) @@ -1227,7 +1229,7 @@ func (ef *execFactory) ConstructInsert( } // Create the table inserter, which does the bulk of the work. ri, err := row.MakeInserter( - ctx, ef.planner.txn, tabDesc, colDescs, checkFKs, fkTables, &ef.planner.alloc, + ctx, ef.planner.txn, ef.planner.ExecCfg().Codec, tabDesc, colDescs, checkFKs, fkTables, &ef.planner.alloc, ) if err != nil { return nil, err @@ -1292,7 +1294,7 @@ func (ef *execFactory) ConstructInsertFastPath( // Create the table inserter, which does the bulk of the work. ri, err := row.MakeInserter( - ctx, ef.planner.txn, tabDesc, colDescs, row.SkipFKs, nil /* fkTables */, &ef.planner.alloc, + ctx, ef.planner.txn, ef.planner.ExecCfg().Codec, tabDesc, colDescs, row.SkipFKs, nil /* fkTables */, &ef.planner.alloc, ) if err != nil { return nil, err @@ -1396,6 +1398,7 @@ func (ef *execFactory) ConstructUpdate( ru, err := row.MakeUpdater( ctx, ef.planner.txn, + ef.planner.ExecCfg().Codec, tabDesc, fkTables, updateColDescs, @@ -1542,7 +1545,14 @@ func (ef *execFactory) ConstructUpsert( // Create the table inserter, which does the bulk of the insert-related work. ri, err := row.MakeInserter( - ctx, ef.planner.txn, tabDesc, insertColDescs, checkFKs, fkTables, &ef.planner.alloc, + ctx, + ef.planner.txn, + ef.planner.ExecCfg().Codec, + tabDesc, + insertColDescs, + checkFKs, + fkTables, + &ef.planner.alloc, ) if err != nil { return nil, err @@ -1552,6 +1562,7 @@ func (ef *execFactory) ConstructUpsert( ru, err := row.MakeUpdater( ctx, ef.planner.txn, + ef.planner.ExecCfg().Codec, tabDesc, fkTables, updateColDescs, @@ -1669,6 +1680,7 @@ func (ef *execFactory) ConstructDelete( rd, err := row.MakeDeleter( ctx, ef.planner.txn, + ef.planner.ExecCfg().Codec, tabDesc, fkTables, fetchColDescs, @@ -1729,7 +1741,7 @@ func (ef *execFactory) ConstructDeleteRange( ) (exec.Node, error) { tabDesc := table.(*optTable).desc indexDesc := &tabDesc.PrimaryIndex - sb := span.MakeBuilder(tabDesc.TableDesc(), indexDesc) + sb := span.MakeBuilder(ef.planner.ExecCfg().Codec, tabDesc.TableDesc(), indexDesc) if err := ef.planner.maybeSetSystemConfig(tabDesc.GetID()); err != nil { return nil, err diff --git a/pkg/sql/partition_utils.go b/pkg/sql/partition_utils.go index aee63ca704b4..72c10e44cde7 100644 --- a/pkg/sql/partition_utils.go +++ b/pkg/sql/partition_utils.go @@ -70,6 +70,7 @@ import ( func GenerateSubzoneSpans( st *cluster.Settings, clusterID uuid.UUID, + codec keys.SQLCodec, tableDesc *sqlbase.TableDescriptor, subzones []zonepb.Subzone, hasNewSubzones bool, @@ -100,7 +101,7 @@ func GenerateSubzoneSpans( if err := tableDesc.ForeachNonDropIndex(func(idxDesc *sqlbase.IndexDescriptor) error { _, indexSubzoneExists := subzoneIndexByIndexID[idxDesc.ID] if indexSubzoneExists { - idxSpan := tableDesc.IndexSpan(idxDesc.ID) + idxSpan := tableDesc.IndexSpan(codec, idxDesc.ID) // Each index starts with a unique prefix, so (from a precedence // perspective) it's safe to append them all together. indexCovering = append(indexCovering, covering.Range{ @@ -111,7 +112,7 @@ func GenerateSubzoneSpans( var emptyPrefix []tree.Datum indexPartitionCoverings, err := indexCoveringsForPartitioning( - a, tableDesc, idxDesc, &idxDesc.Partitioning, subzoneIndexByPartition, emptyPrefix) + a, codec, tableDesc, idxDesc, &idxDesc.Partitioning, subzoneIndexByPartition, emptyPrefix) if err != nil { return err } @@ -169,6 +170,7 @@ func GenerateSubzoneSpans( // `zonepb.Subzone` with the PartitionName set. func indexCoveringsForPartitioning( a *sqlbase.DatumAlloc, + codec keys.SQLCodec, tableDesc *sqlbase.TableDescriptor, idxDesc *sqlbase.IndexDescriptor, partDesc *sqlbase.PartitioningDescriptor, @@ -194,7 +196,7 @@ func indexCoveringsForPartitioning( for _, p := range partDesc.List { for _, valueEncBuf := range p.Values { t, keyPrefix, err := sqlbase.DecodePartitionTuple( - a, tableDesc, idxDesc, partDesc, valueEncBuf, prefixDatums) + a, codec, tableDesc, idxDesc, partDesc, valueEncBuf, prefixDatums) if err != nil { return nil, err } @@ -206,7 +208,7 @@ func indexCoveringsForPartitioning( } newPrefixDatums := append(prefixDatums, t.Datums...) subpartitionCoverings, err := indexCoveringsForPartitioning( - a, tableDesc, idxDesc, &p.Subpartitioning, relevantPartitions, newPrefixDatums) + a, codec, tableDesc, idxDesc, &p.Subpartitioning, relevantPartitions, newPrefixDatums) if err != nil { return nil, err } @@ -226,12 +228,12 @@ func indexCoveringsForPartitioning( continue } _, fromKey, err := sqlbase.DecodePartitionTuple( - a, tableDesc, idxDesc, partDesc, p.FromInclusive, prefixDatums) + a, codec, tableDesc, idxDesc, partDesc, p.FromInclusive, prefixDatums) if err != nil { return nil, err } _, toKey, err := sqlbase.DecodePartitionTuple( - a, tableDesc, idxDesc, partDesc, p.ToExclusive, prefixDatums) + a, codec, tableDesc, idxDesc, partDesc, p.ToExclusive, prefixDatums) if err != nil { return nil, err } diff --git a/pkg/sql/pgwire/testdata/pgtest/notice b/pkg/sql/pgwire/testdata/pgtest/notice index b28dd6d1dce7..bc5f6754f4a8 100644 --- a/pkg/sql/pgwire/testdata/pgtest/notice +++ b/pkg/sql/pgwire/testdata/pgtest/notice @@ -29,7 +29,7 @@ ReadyForQuery ---- {"Type":"ParseComplete"} {"Type":"BindComplete"} -{"Severity":"NOTICE","Code":"00000","Message":"the data for dropped indexes is reclaimed asynchronously","Detail":"","Hint":"The reclamation delay can be customized in the zone configuration for the table.","Position":0,"InternalPosition":0,"InternalQuery":"","Where":"","SchemaName":"","TableName":"","ColumnName":"","DataTypeName":"","ConstraintName":"","File":"drop_index.go","Line":490,"Routine":"dropIndexByName","UnknownFields":null} +{"Severity":"NOTICE","Code":"00000","Message":"the data for dropped indexes is reclaimed asynchronously","Detail":"","Hint":"The reclamation delay can be customized in the zone configuration for the table.","Position":0,"InternalPosition":0,"InternalQuery":"","Where":"","SchemaName":"","TableName":"","ColumnName":"","DataTypeName":"","ConstraintName":"","File":"drop_index.go","Line":496,"Routine":"dropIndexByName","UnknownFields":null} {"Type":"CommandComplete","CommandTag":"DROP INDEX"} {"Type":"ReadyForQuery","TxStatus":"I"} diff --git a/pkg/sql/physicalplan/fake_span_resolver_test.go b/pkg/sql/physicalplan/fake_span_resolver_test.go index 2d8f96e4a652..60ff3182e5bd 100644 --- a/pkg/sql/physicalplan/fake_span_resolver_test.go +++ b/pkg/sql/physicalplan/fake_span_resolver_test.go @@ -56,7 +56,7 @@ func TestFakeSpanResolver(t *testing.T) { tableDesc := sqlbase.GetTableDescriptor(db, "test", "t") primIdxValDirs := sqlbase.IndexKeyValDirs(&tableDesc.PrimaryIndex) - span := tableDesc.PrimaryIndexSpan() + span := tableDesc.PrimaryIndexSpan(keys.SystemSQLCodec) // Make sure we see all the nodes. It will not always happen (due to // randomness) but it should happen most of the time. diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index 926a6cfe94d2..48a841626de3 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -341,6 +341,7 @@ func internalExtendedEvalCtx( TxnReadOnly: false, TxnImplicit: true, Settings: execCfg.Settings, + Codec: execCfg.Codec, Context: ctx, Mon: plannerMon, TestingKnobs: evalContextTestingKnobs, diff --git a/pkg/sql/relocate.go b/pkg/sql/relocate.go index fd86b68297f2..6833d69f13d1 100644 --- a/pkg/sql/relocate.go +++ b/pkg/sql/relocate.go @@ -112,7 +112,7 @@ func (n *relocateNode) Next(params runParams) (bool, error) { // TODO(a-robinson): Get the lastRangeStartKey via the ReturnRangeInfo option // on the BatchRequest Header. We can't do this until v2.2 because admin // requests don't respect the option on versions earlier than v2.1. - rowKey, err := getRowKey(n.tableDesc, n.index, data[1:]) + rowKey, err := getRowKey(params.ExecCfg().Codec, n.tableDesc, n.index, data[1:]) if err != nil { return false, err } diff --git a/pkg/sql/revert.go b/pkg/sql/revert.go index 83031d44445a..b2d52ecb6cb7 100644 --- a/pkg/sql/revert.go +++ b/pkg/sql/revert.go @@ -31,6 +31,7 @@ const RevertTableDefaultBatchSize = 500000 func RevertTables( ctx context.Context, db *kv.DB, + execCfg *ExecutorConfig, tables []*sqlbase.TableDescriptor, targetTime hlc.Timestamp, batchSize int64, @@ -64,7 +65,7 @@ func RevertTables( } } } - spans = append(spans, tables[i].TableSpan()) + spans = append(spans, tables[i].TableSpan(execCfg.Codec)) } for i := range tables { diff --git a/pkg/sql/revert_test.go b/pkg/sql/revert_test.go index aa7a2e5f6742..59b9f2feabaa 100644 --- a/pkg/sql/revert_test.go +++ b/pkg/sql/revert_test.go @@ -31,6 +31,7 @@ func TestRevertTable(t *testing.T) { s, sqlDB, kv := serverutils.StartServer( t, base.TestServerArgs{UseDatabase: "test"}) defer s.Stopper().Stop(context.TODO()) + execCfg := s.ExecutorConfig().(ExecutorConfig) db := sqlutils.MakeSQLRunner(sqlDB) db.Exec(t, `CREATE DATABASE IF NOT EXISTS test`) @@ -68,7 +69,7 @@ func TestRevertTable(t *testing.T) { // Revert the table to ts. desc := sqlbase.GetTableDescriptor(kv, "test", "test") desc.State = sqlbase.TableDescriptor_OFFLINE // bypass the offline check. - require.NoError(t, RevertTables(context.TODO(), kv, []*sqlbase.TableDescriptor{desc}, targetTime, 10)) + require.NoError(t, RevertTables(context.TODO(), kv, &execCfg, []*sqlbase.TableDescriptor{desc}, targetTime, 10)) var reverted int db.QueryRow(t, `SELECT xor_agg(k # rev) FROM test`).Scan(&reverted) @@ -97,14 +98,14 @@ func TestRevertTable(t *testing.T) { child := sqlbase.GetTableDescriptor(kv, "test", "child") child.State = sqlbase.TableDescriptor_OFFLINE t.Run("reject only parent", func(t *testing.T) { - require.Error(t, RevertTables(ctx, kv, []*sqlbase.TableDescriptor{desc}, targetTime, 10)) + require.Error(t, RevertTables(ctx, kv, &execCfg, []*sqlbase.TableDescriptor{desc}, targetTime, 10)) }) t.Run("reject only child", func(t *testing.T) { - require.Error(t, RevertTables(ctx, kv, []*sqlbase.TableDescriptor{child}, targetTime, 10)) + require.Error(t, RevertTables(ctx, kv, &execCfg, []*sqlbase.TableDescriptor{child}, targetTime, 10)) }) t.Run("rollback parent and child", func(t *testing.T) { - require.NoError(t, RevertTables(ctx, kv, []*sqlbase.TableDescriptor{desc, child}, targetTime, RevertTableDefaultBatchSize)) + require.NoError(t, RevertTables(ctx, kv, &execCfg, []*sqlbase.TableDescriptor{desc, child}, targetTime, RevertTableDefaultBatchSize)) var reverted, revertedChild int db.QueryRow(t, `SELECT xor_agg(k # rev) FROM test`).Scan(&reverted) diff --git a/pkg/sql/row/cascader.go b/pkg/sql/row/cascader.go index 095ab8f0db66..c5dab07f715d 100644 --- a/pkg/sql/row/cascader.go +++ b/pkg/sql/row/cascader.go @@ -13,6 +13,7 @@ package row import ( "context" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/parser" @@ -290,6 +291,7 @@ func spanForIndexValues( // the request to the referencing table. func batchRequestForIndexValues( ctx context.Context, + codec keys.SQLCodec, referencedIndex *sqlbase.IndexDescriptor, referencingTable *sqlbase.ImmutableTableDescriptor, referencingIndex *sqlbase.IndexDescriptor, @@ -299,7 +301,7 @@ func batchRequestForIndexValues( ) (roachpb.BatchRequest, map[sqlbase.ColumnID]int, error) { //TODO(bram): consider caching some of these values - keyPrefix := sqlbase.MakeIndexKeyPrefix(referencingTable.TableDesc(), referencingIndex.ID) + keyPrefix := sqlbase.MakeIndexKeyPrefix(codec, referencingTable.TableDesc(), referencingIndex.ID) prefixLen := len(referencingIndex.ColumnIDs) if len(referencedIndex.ColumnIDs) < prefixLen { prefixLen = len(referencedIndex.ColumnIDs) @@ -343,6 +345,7 @@ func batchRequestForIndexValues( // spanForPKValues creates a span against the primary index of a table and is // used to fetch rows for cascading. func spanForPKValues( + codec keys.SQLCodec, table *sqlbase.ImmutableTableDescriptor, fetchColIDtoRowIndex map[sqlbase.ColumnID]int, values tree.Datums, @@ -354,7 +357,7 @@ func spanForPKValues( sqlbase.ForeignKeyReference_SIMPLE, /* primary key lookup can always use MATCH SIMPLE */ fetchColIDtoRowIndex, values, - sqlbase.MakeIndexKeyPrefix(table.TableDesc(), table.PrimaryIndex.ID), + sqlbase.MakeIndexKeyPrefix(codec, table.TableDesc(), table.PrimaryIndex.ID), ) } @@ -362,6 +365,7 @@ func spanForPKValues( // a table and is used to fetch rows for cascading. func batchRequestForPKValues( ctx context.Context, + codec keys.SQLCodec, table *sqlbase.ImmutableTableDescriptor, fetchColIDtoRowIndex map[sqlbase.ColumnID]int, values *rowcontainer.RowContainer, @@ -369,7 +373,7 @@ func batchRequestForPKValues( ) (roachpb.BatchRequest, error) { var req roachpb.BatchRequest for i := 0; i < values.Len(); i++ { - span, err := spanForPKValues(table, fetchColIDtoRowIndex, values.At(i)) + span, err := spanForPKValues(codec, table, fetchColIDtoRowIndex, values.At(i)) if err != nil { return roachpb.BatchRequest{}, err } @@ -414,6 +418,7 @@ func (c *cascader) addIndexPKRowFetcher( isSecondary := table.PrimaryIndex.ID != index.ID var rowFetcher Fetcher if err := rowFetcher.Init( + c.evalCtx.Codec, false, /* reverse */ sqlbase.ScanLockingStrength_FOR_NONE, false, /* returnRangeInfo */ @@ -455,6 +460,7 @@ func (c *cascader) addRowDeleter( rowDeleter, err := makeRowDeleterWithoutCascader( ctx, c.txn, + c.evalCtx.Codec, table, c.fkTables, nil, /* requestedCol */ @@ -479,6 +485,7 @@ func (c *cascader) addRowDeleter( } var rowFetcher Fetcher if err := rowFetcher.Init( + c.evalCtx.Codec, false, /* reverse */ // TODO(nvanbenschoten): it might make sense to use a FOR_UPDATE locking // strength here. Consider hooking this in to the same knob that will @@ -519,6 +526,7 @@ func (c *cascader) addRowUpdater( rowUpdater, err := makeUpdaterWithoutCascader( ctx, c.txn, + c.evalCtx.Codec, table, c.fkTables, table.Columns, @@ -545,6 +553,7 @@ func (c *cascader) addRowUpdater( } var rowFetcher Fetcher if err := rowFetcher.Init( + c.evalCtx.Codec, false, /* reverse */ // TODO(nvanbenschoten): it might make sense to use a FOR_UPDATE locking // strength here. Consider hooking this in to the same knob that will @@ -585,7 +594,7 @@ func (c *cascader) deleteRows( ) } req, _, err := batchRequestForIndexValues( - ctx, referencedIndex, referencingTable, referencingIndex, match, values, traceKV, + ctx, c.evalCtx.Codec, referencedIndex, referencingTable, referencingIndex, match, values, traceKV, ) if err != nil { return nil, nil, 0, err @@ -649,7 +658,7 @@ func (c *cascader) deleteRows( // Create a batch request to get all the spans of the primary keys that need // to be deleted. pkLookupReq, err := batchRequestForPKValues( - ctx, referencingTable, indexPKRowFetcherColIDToRowIndex, primaryKeysToDelete, traceKV, + ctx, c.evalCtx.Codec, referencingTable, indexPKRowFetcherColIDToRowIndex, primaryKeysToDelete, traceKV, ) if err != nil { return nil, nil, 0, err @@ -836,7 +845,7 @@ func (c *cascader) updateRows( // Extract a single value to update at a time. req, valueColIDtoRowIndex, err := batchRequestForIndexValues( - ctx, referencedIndex, referencingTable, referencingIndex, match, cascadeQueueElement{ + ctx, c.evalCtx.Codec, referencedIndex, referencingTable, referencingIndex, match, cascadeQueueElement{ startIndex: i, endIndex: i + 1, originalValues: values.originalValues, @@ -902,7 +911,7 @@ func (c *cascader) updateRows( // Create a batch request to get all the spans of the primary keys that need // to be updated. pkLookupReq, err := batchRequestForPKValues( - ctx, referencingTable, indexPKRowFetcherColIDToRowIndex, primaryKeysToUpdate, traceKV, + ctx, c.evalCtx.Codec, referencingTable, indexPKRowFetcherColIDToRowIndex, primaryKeysToUpdate, traceKV, ) if err != nil { return nil, nil, nil, 0, err diff --git a/pkg/sql/row/deleter.go b/pkg/sql/row/deleter.go index 401e897bef0d..3c3fa914dd6f 100644 --- a/pkg/sql/row/deleter.go +++ b/pkg/sql/row/deleter.go @@ -40,6 +40,7 @@ type Deleter struct { func MakeDeleter( ctx context.Context, txn *kv.Txn, + codec keys.SQLCodec, tableDesc *sqlbase.ImmutableTableDescriptor, fkTables FkTableMetadata, requestedCols []sqlbase.ColumnDescriptor, @@ -48,7 +49,7 @@ func MakeDeleter( alloc *sqlbase.DatumAlloc, ) (Deleter, error) { rowDeleter, err := makeRowDeleterWithoutCascader( - ctx, txn, tableDesc, fkTables, requestedCols, checkFKs, alloc, + ctx, txn, codec, tableDesc, fkTables, requestedCols, checkFKs, alloc, ) if err != nil { return Deleter{}, err @@ -90,6 +91,7 @@ func MakeDeleter( func makeRowDeleterWithoutCascader( ctx context.Context, txn *kv.Txn, + codec keys.SQLCodec, tableDesc *sqlbase.ImmutableTableDescriptor, fkTables FkTableMetadata, requestedCols []sqlbase.ColumnDescriptor, @@ -132,13 +134,13 @@ func makeRowDeleterWithoutCascader( } rd := Deleter{ - Helper: newRowHelper(tableDesc, indexes), + Helper: newRowHelper(codec, tableDesc, indexes), FetchCols: fetchCols, FetchColIDtoRowIndex: fetchColIDtoRowIndex, } if checkFKs == CheckFKs { var err error - if rd.Fks, err = makeFkExistenceCheckHelperForDelete(ctx, txn, tableDesc, fkTables, + if rd.Fks, err = makeFkExistenceCheckHelperForDelete(ctx, txn, codec, tableDesc, fkTables, fetchColIDtoRowIndex, alloc); err != nil { return Deleter{}, err } @@ -159,7 +161,13 @@ func (rd *Deleter) DeleteRow( for i := range rd.Helper.Indexes { // We want to include empty k/v pairs because we want to delete all k/v's for this row. entries, err := sqlbase.EncodeSecondaryIndex( - rd.Helper.TableDesc.TableDesc(), &rd.Helper.Indexes[i], rd.FetchColIDtoRowIndex, values, true /* includeEmpty */) + rd.Helper.Codec, + rd.Helper.TableDesc.TableDesc(), + &rd.Helper.Indexes[i], + rd.FetchColIDtoRowIndex, + values, + true, /* includeEmpty */ + ) if err != nil { return err } @@ -232,7 +240,13 @@ func (rd *Deleter) DeleteIndexRow( // to true, we will get a k/v pair for each family in the row, // which will guarantee that we delete all the k/v's in this row. secondaryIndexEntry, err := sqlbase.EncodeSecondaryIndex( - rd.Helper.TableDesc.TableDesc(), idx, rd.FetchColIDtoRowIndex, values, true /* includeEmpty */) + rd.Helper.Codec, + rd.Helper.TableDesc.TableDesc(), + idx, + rd.FetchColIDtoRowIndex, + values, + true, /* includeEmpty */ + ) if err != nil { return err } diff --git a/pkg/sql/row/errors.go b/pkg/sql/row/errors.go index df74e531462e..2457cab00a3a 100644 --- a/pkg/sql/row/errors.go +++ b/pkg/sql/row/errors.go @@ -14,6 +14,7 @@ import ( "context" "strings" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -73,10 +74,15 @@ func NewUniquenessConstraintViolationError( key roachpb.Key, value *roachpb.Value, ) error { - // TODO(dan): There's too much internal knowledge of the sql table - // encoding here (and this callsite is the only reason - // DecodeIndexKeyPrefix is exported). Refactor this bit out. - indexID, _, err := sqlbase.DecodeIndexKeyPrefix(tableDesc.TableDesc(), key) + // Strip the tenant prefix and pretend use the system tenant's SQL codec for + // the rest of this function. This is safe because the key is just used to + // decode the corresponding datums and never escapes this function. + codec := keys.SystemSQLCodec + key, _, err := keys.DecodeTenantPrefix(key) + if err != nil { + return err + } + indexID, _, err := sqlbase.DecodeIndexKeyPrefix(codec, tableDesc.TableDesc(), key) if err != nil { return err } @@ -109,6 +115,7 @@ func NewUniquenessConstraintViolationError( ValNeededForCol: valNeededForCol, } if err := rf.Init( + codec, false, /* reverse */ sqlbase.ScanLockingStrength_FOR_NONE, false, /* returnRangeInfo */ diff --git a/pkg/sql/row/fetcher.go b/pkg/sql/row/fetcher.go index a0a807b6f217..b289c7afbb04 100644 --- a/pkg/sql/row/fetcher.go +++ b/pkg/sql/row/fetcher.go @@ -161,6 +161,9 @@ type FetcherTableArgs struct { // // Process res.row // } type Fetcher struct { + // codec is used to encode and decode SQL keys. + codec keys.SQLCodec + // tables is a slice of all the tables and their descriptors for which // rows are returned. tables []tableInfo @@ -245,6 +248,7 @@ func (rf *Fetcher) Reset() { // non-primary index, tables.ValNeededForCol can only refer to columns in the // index. func (rf *Fetcher) Init( + codec keys.SQLCodec, reverse bool, lockStr sqlbase.ScanLockingStrength, returnRangeInfo bool, @@ -256,6 +260,7 @@ func (rf *Fetcher) Init( return errors.AssertionFailedf("no tables to fetch from") } + rf.codec = codec rf.reverse = reverse rf.lockStr = lockStr rf.returnRangeInfo = returnRangeInfo @@ -336,7 +341,9 @@ func (rf *Fetcher) Init( } } - table.knownPrefixLength = len(sqlbase.MakeIndexKeyPrefix(table.desc.TableDesc(), table.index.ID)) + table.knownPrefixLength = len( + sqlbase.MakeIndexKeyPrefix(codec, table.desc.TableDesc(), table.index.ID), + ) var indexColumnIDs []sqlbase.ColumnID indexColumnIDs, table.indexColumnDirs = table.index.FullColumnIDs() @@ -1354,7 +1361,7 @@ func (rf *Fetcher) checkSecondaryIndexDatumEncodings(ctx context.Context) error // The below code makes incorrect checks (#45256). indexEntries, err := sqlbase.EncodeSecondaryIndex( - table.desc.TableDesc(), table.index, table.colIdxMap, values, false /* includeEmpty */) + rf.codec, table.desc.TableDesc(), table.index, table.colIdxMap, values, false /* includeEmpty */) if err != nil { return err } diff --git a/pkg/sql/row/fetcher_mvcc_test.go b/pkg/sql/row/fetcher_mvcc_test.go index 7106e383aa42..cd4c307fede1 100644 --- a/pkg/sql/row/fetcher_mvcc_test.go +++ b/pkg/sql/row/fetcher_mvcc_test.go @@ -95,7 +95,7 @@ func TestRowFetcherMVCCMetadata(t *testing.T) { valNeededForCol.Add(colIdx) } args = append(args, row.FetcherTableArgs{ - Spans: desc.AllIndexSpans(), + Spans: desc.AllIndexSpans(keys.SystemSQLCodec), Desc: desc, Index: &desc.PrimaryIndex, ColIdxMap: colIdxMap, @@ -106,6 +106,7 @@ func TestRowFetcherMVCCMetadata(t *testing.T) { } var rf row.Fetcher if err := rf.Init( + keys.SystemSQLCodec, false, /* reverse */ sqlbase.ScanLockingStrength_FOR_NONE, false, /* returnRangeInfo */ diff --git a/pkg/sql/row/fetcher_test.go b/pkg/sql/row/fetcher_test.go index 877d695192c6..d78f7a0b2aa2 100644 --- a/pkg/sql/row/fetcher_test.go +++ b/pkg/sql/row/fetcher_test.go @@ -18,6 +18,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -69,9 +70,11 @@ func initFetcher( ) (fetcher *Fetcher, err error) { fetcher = &Fetcher{} + fetcherCodec := keys.SystemSQLCodec fetcherArgs := makeFetcherArgs(entries) if err := fetcher.Init( + fetcherCodec, reverseScan, sqlbase.ScanLockingStrength_FOR_NONE, false, /* returnRangeInfo */ @@ -166,7 +169,7 @@ func TestNextRowSingle(t *testing.T) { if err := rf.StartScan( context.TODO(), kv.NewTxn(ctx, kvDB, 0), - roachpb.Spans{tableDesc.IndexSpan(tableDesc.PrimaryIndex.ID)}, + roachpb.Spans{tableDesc.IndexSpan(keys.SystemSQLCodec, tableDesc.PrimaryIndex.ID)}, false, /*limitBatches*/ 0, /*limitHint*/ false, /*traceKV*/ @@ -286,7 +289,7 @@ func TestNextRowBatchLimiting(t *testing.T) { if err := rf.StartScan( context.TODO(), kv.NewTxn(ctx, kvDB, 0), - roachpb.Spans{tableDesc.IndexSpan(tableDesc.PrimaryIndex.ID)}, + roachpb.Spans{tableDesc.IndexSpan(keys.SystemSQLCodec, tableDesc.PrimaryIndex.ID)}, true, /*limitBatches*/ 10, /*limitHint*/ false, /*traceKV*/ @@ -406,7 +409,7 @@ INDEX(c) // We'll make the first span go to some random key in the middle of the // key space (by appending a number to the index's start key) and the // second span go from that key to the end of the index. - indexSpan := tableDesc.IndexSpan(tableDesc.PrimaryIndex.ID) + indexSpan := tableDesc.IndexSpan(keys.SystemSQLCodec, tableDesc.PrimaryIndex.ID) endKey := indexSpan.EndKey midKey := encoding.EncodeUvarintAscending(indexSpan.Key, uint64(100)) indexSpan.EndKey = midKey @@ -578,7 +581,7 @@ func TestNextRowSecondaryIndex(t *testing.T) { if err := rf.StartScan( context.TODO(), kv.NewTxn(ctx, kvDB, 0), - roachpb.Spans{tableDesc.IndexSpan(tableDesc.Indexes[0].ID)}, + roachpb.Spans{tableDesc.IndexSpan(keys.SystemSQLCodec, tableDesc.Indexes[0].ID)}, false, /*limitBatches*/ 0, /*limitHint*/ false, /*traceKV*/ @@ -920,7 +923,7 @@ func TestNextRowInterleaved(t *testing.T) { // We take every entry's index span (primary or // secondary) and use it to start our scan. - lookupSpans[i] = tableDesc.IndexSpan(indexID) + lookupSpans[i] = tableDesc.IndexSpan(keys.SystemSQLCodec, indexID) args[i] = initFetcherArgs{ tableDesc: tableDesc, @@ -1059,7 +1062,7 @@ func TestRowFetcherReset(t *testing.T) { fetcherArgs := makeFetcherArgs(args) if err := resetFetcher.Init( - false /*reverse*/, 0 /* todo */, false /* returnRangeInfo */, false /* isCheck */, &da, fetcherArgs..., + keys.SystemSQLCodec, false /*reverse*/, 0 /* todo */, false /* returnRangeInfo */, false /* isCheck */, &da, fetcherArgs..., ); err != nil { t.Fatal(err) } diff --git a/pkg/sql/row/fk_existence_base.go b/pkg/sql/row/fk_existence_base.go index 4c3119b46889..584909c80e96 100644 --- a/pkg/sql/row/fk_existence_base.go +++ b/pkg/sql/row/fk_existence_base.go @@ -13,6 +13,7 @@ package row import ( "sort" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -118,6 +119,7 @@ type fkExistenceCheckBaseHelper struct { // sql, sqlbase, row. The proliferation is annoying. func makeFkExistenceCheckBaseHelper( txn *kv.Txn, + codec keys.SQLCodec, otherTables FkTableMetadata, ref *sqlbase.ForeignKeyConstraint, searchIdx *sqlbase.IndexDescriptor, @@ -147,6 +149,7 @@ func makeFkExistenceCheckBaseHelper( } rf := &Fetcher{} if err := rf.Init( + codec, false, /* reverse */ sqlbase.ScanLockingStrength_FOR_NONE, false, /* returnRangeInfo */ @@ -168,7 +171,7 @@ func makeFkExistenceCheckBaseHelper( ids: ids, prefixLen: len(ref.OriginColumnIDs), valuesScratch: make(tree.Datums, len(ref.OriginColumnIDs)), - spanBuilder: span.MakeBuilder(searchTable.TableDesc(), searchIdx), + spanBuilder: span.MakeBuilder(codec, searchTable.TableDesc(), searchIdx), }, nil } diff --git a/pkg/sql/row/fk_existence_delete.go b/pkg/sql/row/fk_existence_delete.go index a8acc6a409d8..beb0b4e22e65 100644 --- a/pkg/sql/row/fk_existence_delete.go +++ b/pkg/sql/row/fk_existence_delete.go @@ -13,6 +13,7 @@ package row import ( "context" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -36,6 +37,7 @@ type fkExistenceCheckForDelete struct { func makeFkExistenceCheckHelperForDelete( ctx context.Context, txn *kv.Txn, + codec keys.SQLCodec, table *sqlbase.ImmutableTableDescriptor, otherTables FkTableMetadata, colMap map[sqlbase.ColumnID]int, @@ -83,7 +85,7 @@ func makeFkExistenceCheckHelperForDelete( return fkExistenceCheckForDelete{}, errors.NewAssertionErrorWithWrappedErrf( err, "failed to find a suitable index on table %d for deletion", ref.ReferencedTableID) } - fk, err := makeFkExistenceCheckBaseHelper(txn, otherTables, fakeRef, searchIdx, mutatedIdx, colMap, alloc, + fk, err := makeFkExistenceCheckBaseHelper(txn, codec, otherTables, fakeRef, searchIdx, mutatedIdx, colMap, alloc, CheckDeletes) if err == errSkipUnusedFK { continue diff --git a/pkg/sql/row/fk_existence_insert.go b/pkg/sql/row/fk_existence_insert.go index e784ea6920fa..3ce6bd663335 100644 --- a/pkg/sql/row/fk_existence_insert.go +++ b/pkg/sql/row/fk_existence_insert.go @@ -13,6 +13,7 @@ package row import ( "context" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -45,6 +46,7 @@ type fkExistenceCheckForInsert struct { func makeFkExistenceCheckHelperForInsert( ctx context.Context, txn *kv.Txn, + codec keys.SQLCodec, table *sqlbase.ImmutableTableDescriptor, otherTables FkTableMetadata, colMap map[sqlbase.ColumnID]int, @@ -75,7 +77,7 @@ func makeFkExistenceCheckHelperForInsert( return h, errors.NewAssertionErrorWithWrappedErrf(err, "failed to find suitable search index for fk %q", ref.Name) } - fk, err := makeFkExistenceCheckBaseHelper(txn, otherTables, ref, searchIdx, mutatedIdx, colMap, alloc, CheckInserts) + fk, err := makeFkExistenceCheckBaseHelper(txn, codec, otherTables, ref, searchIdx, mutatedIdx, colMap, alloc, CheckInserts) if err == errSkipUnusedFK { continue } diff --git a/pkg/sql/row/fk_existence_update.go b/pkg/sql/row/fk_existence_update.go index 0aba15b8fe4e..b0b5187e844c 100644 --- a/pkg/sql/row/fk_existence_update.go +++ b/pkg/sql/row/fk_existence_update.go @@ -13,6 +13,7 @@ package row import ( "context" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -64,6 +65,7 @@ type fkExistenceCheckForUpdate struct { func makeFkExistenceCheckHelperForUpdate( ctx context.Context, txn *kv.Txn, + codec keys.SQLCodec, table *sqlbase.ImmutableTableDescriptor, otherTables FkTableMetadata, updateCols []sqlbase.ColumnDescriptor, @@ -76,13 +78,13 @@ func makeFkExistenceCheckHelperForUpdate( // Instantiate a helper for the referencing tables. var err error - if ret.inbound, err = makeFkExistenceCheckHelperForDelete(ctx, txn, table, otherTables, colMap, + if ret.inbound, err = makeFkExistenceCheckHelperForDelete(ctx, txn, codec, table, otherTables, colMap, alloc); err != nil { return ret, err } // Instantiate a helper for the referenced table(s). - ret.outbound, err = makeFkExistenceCheckHelperForInsert(ctx, txn, table, otherTables, colMap, alloc) + ret.outbound, err = makeFkExistenceCheckHelperForInsert(ctx, txn, codec, table, otherTables, colMap, alloc) ret.outbound.checker = ret.inbound.checker // We need *some* KV batch checker to perform the checks. It doesn't diff --git a/pkg/sql/row/helper.go b/pkg/sql/row/helper.go index a29ff00f91af..0b764b25439a 100644 --- a/pkg/sql/row/helper.go +++ b/pkg/sql/row/helper.go @@ -13,6 +13,7 @@ package row import ( "sort" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -20,6 +21,8 @@ import ( // rowHelper has the common methods for table row manipulations. type rowHelper struct { + Codec keys.SQLCodec + TableDesc *sqlbase.ImmutableTableDescriptor // Secondary indexes. Indexes []sqlbase.IndexDescriptor @@ -36,9 +39,9 @@ type rowHelper struct { } func newRowHelper( - desc *sqlbase.ImmutableTableDescriptor, indexes []sqlbase.IndexDescriptor, + codec keys.SQLCodec, desc *sqlbase.ImmutableTableDescriptor, indexes []sqlbase.IndexDescriptor, ) rowHelper { - rh := rowHelper{TableDesc: desc, Indexes: indexes} + rh := rowHelper{Codec: codec, TableDesc: desc, Indexes: indexes} // Pre-compute the encoding directions of the index key values for // pretty-printing in traces. @@ -75,7 +78,7 @@ func (rh *rowHelper) encodePrimaryIndex( colIDtoRowIndex map[sqlbase.ColumnID]int, values []tree.Datum, ) (primaryIndexKey []byte, err error) { if rh.primaryIndexKeyPrefix == nil { - rh.primaryIndexKeyPrefix = sqlbase.MakeIndexKeyPrefix(rh.TableDesc.TableDesc(), + rh.primaryIndexKeyPrefix = sqlbase.MakeIndexKeyPrefix(rh.Codec, rh.TableDesc.TableDesc(), rh.TableDesc.PrimaryIndex.ID) } primaryIndexKey, _, err = sqlbase.EncodeIndexKey( @@ -94,7 +97,14 @@ func (rh *rowHelper) encodeSecondaryIndexes( rh.indexEntries = make([]sqlbase.IndexEntry, 0, len(rh.Indexes)) } rh.indexEntries, err = sqlbase.EncodeSecondaryIndexes( - rh.TableDesc.TableDesc(), rh.Indexes, colIDtoRowIndex, values, rh.indexEntries[:0], includeEmpty) + rh.Codec, + rh.TableDesc.TableDesc(), + rh.Indexes, + colIDtoRowIndex, + values, + rh.indexEntries[:0], + includeEmpty, + ) if err != nil { return nil, err } diff --git a/pkg/sql/row/inserter.go b/pkg/sql/row/inserter.go index 1a67371fcbeb..0f99232770c4 100644 --- a/pkg/sql/row/inserter.go +++ b/pkg/sql/row/inserter.go @@ -14,6 +14,7 @@ import ( "context" "fmt" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -42,6 +43,7 @@ type Inserter struct { func MakeInserter( ctx context.Context, txn *kv.Txn, + codec keys.SQLCodec, tableDesc *sqlbase.ImmutableTableDescriptor, insertCols []sqlbase.ColumnDescriptor, checkFKs checkFKConstraints, @@ -49,7 +51,7 @@ func MakeInserter( alloc *sqlbase.DatumAlloc, ) (Inserter, error) { ri := Inserter{ - Helper: newRowHelper(tableDesc, tableDesc.WritableIndexes()), + Helper: newRowHelper(codec, tableDesc, tableDesc.WritableIndexes()), InsertCols: insertCols, InsertColIDtoRowIndex: ColIDtoRowIndexFromCols(insertCols), marshaled: make([]roachpb.Value, len(insertCols)), @@ -63,7 +65,7 @@ func MakeInserter( if checkFKs == CheckFKs { var err error - if ri.Fks, err = makeFkExistenceCheckHelperForInsert(ctx, txn, tableDesc, fkTables, + if ri.Fks, err = makeFkExistenceCheckHelperForInsert(ctx, txn, codec, tableDesc, fkTables, ri.InsertColIDtoRowIndex, alloc); err != nil { return ri, err } diff --git a/pkg/sql/row/row_converter.go b/pkg/sql/row/row_converter.go index 3d898ad39691..d35c87601664 100644 --- a/pkg/sql/row/row_converter.go +++ b/pkg/sql/row/row_converter.go @@ -277,6 +277,7 @@ func NewDatumRowConverter( ri, err := MakeInserter( ctx, nil, /* txn */ + evalCtx.Codec, immutDesc, cols, SkipFKs, diff --git a/pkg/sql/row/updater.go b/pkg/sql/row/updater.go index aa7d4234b82f..8f17629f6bf6 100644 --- a/pkg/sql/row/updater.go +++ b/pkg/sql/row/updater.go @@ -76,6 +76,7 @@ const ( func MakeUpdater( ctx context.Context, txn *kv.Txn, + codec keys.SQLCodec, tableDesc *sqlbase.ImmutableTableDescriptor, fkTables FkTableMetadata, updateCols []sqlbase.ColumnDescriptor, @@ -86,7 +87,7 @@ func MakeUpdater( alloc *sqlbase.DatumAlloc, ) (Updater, error) { rowUpdater, err := makeUpdaterWithoutCascader( - ctx, txn, tableDesc, fkTables, updateCols, requestedCols, updateType, checkFKs, alloc, + ctx, txn, codec, tableDesc, fkTables, updateCols, requestedCols, updateType, checkFKs, alloc, ) if err != nil { return Updater{}, err @@ -113,6 +114,7 @@ var returnTruePseudoError error = returnTrue{} func makeUpdaterWithoutCascader( ctx context.Context, txn *kv.Txn, + codec keys.SQLCodec, tableDesc *sqlbase.ImmutableTableDescriptor, fkTables FkTableMetadata, updateCols []sqlbase.ColumnDescriptor, @@ -178,12 +180,12 @@ func makeUpdaterWithoutCascader( var deleteOnlyHelper *rowHelper if len(deleteOnlyIndexes) > 0 { - rh := newRowHelper(tableDesc, deleteOnlyIndexes) + rh := newRowHelper(codec, tableDesc, deleteOnlyIndexes) deleteOnlyHelper = &rh } ru := Updater{ - Helper: newRowHelper(tableDesc, includeIndexes), + Helper: newRowHelper(codec, tableDesc, includeIndexes), DeleteHelper: deleteOnlyHelper, UpdateCols: updateCols, UpdateColIDtoRowIndex: updateColIDtoRowIndex, @@ -199,14 +201,14 @@ func makeUpdaterWithoutCascader( // them, so request them all. var err error if ru.rd, err = makeRowDeleterWithoutCascader( - ctx, txn, tableDesc, fkTables, tableCols, SkipFKs, alloc, + ctx, txn, codec, tableDesc, fkTables, tableCols, SkipFKs, alloc, ); err != nil { return Updater{}, err } ru.FetchCols = ru.rd.FetchCols ru.FetchColIDtoRowIndex = ColIDtoRowIndexFromCols(ru.FetchCols) if ru.ri, err = MakeInserter( - ctx, txn, tableDesc, tableCols, SkipFKs, nil /* fkTables */, alloc, + ctx, txn, codec, tableDesc, tableCols, SkipFKs, nil /* fkTables */, alloc, ); err != nil { return Updater{}, err } @@ -281,8 +283,9 @@ func makeUpdaterWithoutCascader( if primaryKeyColChange { updateCols = nil } - if ru.Fks, err = makeFkExistenceCheckHelperForUpdate(ctx, txn, tableDesc, fkTables, - updateCols, ru.FetchColIDtoRowIndex, alloc); err != nil { + if ru.Fks, err = makeFkExistenceCheckHelperForUpdate( + ctx, txn, codec, tableDesc, fkTables, updateCols, ru.FetchColIDtoRowIndex, alloc, + ); err != nil { return Updater{}, err } } @@ -378,6 +381,7 @@ func (ru *Updater) UpdateRow( // set includeEmpty to false while generating the old // and new index entries. ru.oldIndexEntries[i], err = sqlbase.EncodeSecondaryIndex( + ru.Helper.Codec, ru.Helper.TableDesc.TableDesc(), &ru.Helper.Indexes[i], ru.FetchColIDtoRowIndex, @@ -388,6 +392,7 @@ func (ru *Updater) UpdateRow( return nil, err } ru.newIndexEntries[i], err = sqlbase.EncodeSecondaryIndex( + ru.Helper.Codec, ru.Helper.TableDesc.TableDesc(), &ru.Helper.Indexes[i], ru.FetchColIDtoRowIndex, diff --git a/pkg/sql/rowexec/index_skip_table_reader.go b/pkg/sql/rowexec/index_skip_table_reader.go index 8034ac35e462..7b90d8644722 100644 --- a/pkg/sql/rowexec/index_skip_table_reader.go +++ b/pkg/sql/rowexec/index_skip_table_reader.go @@ -125,6 +125,7 @@ func newIndexSkipTableReader( } if err := t.fetcher.Init( + flowCtx.Codec(), t.reverse, spec.LockingStrength, true, /* returnRangeInfo */ diff --git a/pkg/sql/rowexec/index_skip_table_reader_test.go b/pkg/sql/rowexec/index_skip_table_reader_test.go index 805098164300..451178ac8ab0 100644 --- a/pkg/sql/rowexec/index_skip_table_reader_test.go +++ b/pkg/sql/rowexec/index_skip_table_reader_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -168,7 +169,7 @@ func TestIndexSkipTableReader(t *testing.T) { makeIndexSpan := func(td *sqlbase.TableDescriptor, start, end int) execinfrapb.TableReaderSpan { var span roachpb.Span - prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(td, td.PrimaryIndex.ID)) + prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, td, td.PrimaryIndex.ID)) span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) span.EndKey = append(span.EndKey, prefix...) span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) @@ -187,7 +188,7 @@ func TestIndexSkipTableReader(t *testing.T) { desc: "SimpleForward", tableDesc: td1, spec: execinfrapb.IndexSkipTableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td1.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td1.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, post: execinfrapb.PostProcessSpec{ Projection: true, @@ -200,7 +201,7 @@ func TestIndexSkipTableReader(t *testing.T) { desc: "InterleavedParent", tableDesc: td5, spec: execinfrapb.IndexSkipTableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td5.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td5.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, post: execinfrapb.PostProcessSpec{ Projection: true, @@ -213,7 +214,7 @@ func TestIndexSkipTableReader(t *testing.T) { desc: "InterleavedChild", tableDesc: td6, spec: execinfrapb.IndexSkipTableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td6.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td6.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, post: execinfrapb.PostProcessSpec{ Projection: true, @@ -253,7 +254,7 @@ func TestIndexSkipTableReader(t *testing.T) { desc: "Filter", tableDesc: td1, spec: execinfrapb.IndexSkipTableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td1.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td1.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, post: execinfrapb.PostProcessSpec{ Filter: execinfrapb.Expression{Expr: "@1 > 3 AND @1 < 7"}, @@ -267,7 +268,7 @@ func TestIndexSkipTableReader(t *testing.T) { desc: "MultipleOutputCols", tableDesc: td2, spec: execinfrapb.IndexSkipTableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td2.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td2.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, post: execinfrapb.PostProcessSpec{ Projection: true, @@ -280,7 +281,7 @@ func TestIndexSkipTableReader(t *testing.T) { desc: "Nulls", tableDesc: td3, spec: execinfrapb.IndexSkipTableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td3.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td3.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, post: execinfrapb.PostProcessSpec{ Projection: true, @@ -293,7 +294,7 @@ func TestIndexSkipTableReader(t *testing.T) { desc: "SecondaryIdx", tableDesc: td4, spec: execinfrapb.IndexSkipTableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td4.IndexSpan(2)}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td4.IndexSpan(keys.SystemSQLCodec, 2)}}, IndexIdx: 1, }, post: execinfrapb.PostProcessSpec{ @@ -307,7 +308,7 @@ func TestIndexSkipTableReader(t *testing.T) { desc: "SimpleReverse", tableDesc: td1, spec: execinfrapb.IndexSkipTableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td1.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td1.PrimaryIndexSpan(keys.SystemSQLCodec)}}, Reverse: true, }, post: execinfrapb.PostProcessSpec{ @@ -350,7 +351,7 @@ func TestIndexSkipTableReader(t *testing.T) { desc: "InterleavedParentReverse", tableDesc: td5, spec: execinfrapb.IndexSkipTableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td5.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td5.PrimaryIndexSpan(keys.SystemSQLCodec)}}, Reverse: true, }, post: execinfrapb.PostProcessSpec{ @@ -378,7 +379,7 @@ func TestIndexSkipTableReader(t *testing.T) { desc: "InterleavedChildReverse", tableDesc: td6, spec: execinfrapb.IndexSkipTableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td6.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td6.PrimaryIndexSpan(keys.SystemSQLCodec)}}, Reverse: true, }, post: execinfrapb.PostProcessSpec{ @@ -392,7 +393,7 @@ func TestIndexSkipTableReader(t *testing.T) { desc: "IndexMultipleNulls", tableDesc: td7, spec: execinfrapb.IndexSkipTableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td7.IndexSpan(2)}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td7.IndexSpan(keys.SystemSQLCodec, 2)}}, IndexIdx: 1, }, post: execinfrapb.PostProcessSpec{ @@ -406,7 +407,7 @@ func TestIndexSkipTableReader(t *testing.T) { desc: "IndexAllNulls", tableDesc: td7, spec: execinfrapb.IndexSkipTableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td7.IndexSpan(3)}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td7.IndexSpan(keys.SystemSQLCodec, 3)}}, IndexIdx: 2, }, post: execinfrapb.PostProcessSpec{ @@ -502,7 +503,7 @@ ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[ NodeID: nodeID, } spec := execinfrapb.IndexSkipTableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td.PrimaryIndexSpan(keys.SystemSQLCodec)}}, Table: *td, } post := execinfrapb.PostProcessSpec{ @@ -630,7 +631,7 @@ func BenchmarkIndexScanTableReader(b *testing.B) { b.Run(fmt.Sprintf("TableReader+Distinct-rows=%d-ratio=%d", numRows, valueRatio), func(b *testing.B) { spec := execinfrapb.TableReaderSpec{ Table: *tableDesc, - Spans: []execinfrapb.TableReaderSpan{{Span: tableDesc.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: tableDesc.PrimaryIndexSpan(keys.SystemSQLCodec)}}, } post := execinfrapb.PostProcessSpec{ Projection: true, @@ -668,7 +669,7 @@ func BenchmarkIndexScanTableReader(b *testing.B) { b.Run(fmt.Sprintf("IndexSkipTableReader-rows=%d-ratio=%d", numRows, valueRatio), func(b *testing.B) { spec := execinfrapb.IndexSkipTableReaderSpec{ Table: *tableDesc, - Spans: []execinfrapb.TableReaderSpan{{Span: tableDesc.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: tableDesc.PrimaryIndexSpan(keys.SystemSQLCodec)}}, } post := execinfrapb.PostProcessSpec{ OutputColumns: []uint32{0}, diff --git a/pkg/sql/rowexec/indexbackfiller.go b/pkg/sql/rowexec/indexbackfiller.go index 40aaa442f0dd..00d57522615c 100644 --- a/pkg/sql/rowexec/indexbackfiller.go +++ b/pkg/sql/rowexec/indexbackfiller.go @@ -78,7 +78,7 @@ func newIndexBackfiller( } ib.backfiller.chunks = ib - if err := ib.IndexBackfiller.Init(ib.desc); err != nil { + if err := ib.IndexBackfiller.Init(flowCtx.NewEvalCtx(), ib.desc); err != nil { return nil, err } diff --git a/pkg/sql/rowexec/indexjoiner.go b/pkg/sql/rowexec/indexjoiner.go index 12f66d7a6835..3ad3e1158406 100644 --- a/pkg/sql/rowexec/indexjoiner.go +++ b/pkg/sql/rowexec/indexjoiner.go @@ -99,6 +99,7 @@ func newIndexJoiner( } var fetcher row.Fetcher if _, _, err := initRowFetcher( + flowCtx, &fetcher, &ij.desc, 0, /* primary index */ @@ -122,7 +123,7 @@ func newIndexJoiner( ij.fetcher = &fetcher } - ij.spanBuilder = span.MakeBuilder(&spec.Table, &spec.Table.PrimaryIndex) + ij.spanBuilder = span.MakeBuilder(flowCtx.Codec(), &spec.Table, &spec.Table.PrimaryIndex) ij.spanBuilder.SetNeededColumns(ij.Out.NeededColumns()) return ij, nil diff --git a/pkg/sql/rowexec/interleaved_reader_joiner.go b/pkg/sql/rowexec/interleaved_reader_joiner.go index 84d7fecb0937..0b26c92694d5 100644 --- a/pkg/sql/rowexec/interleaved_reader_joiner.go +++ b/pkg/sql/rowexec/interleaved_reader_joiner.go @@ -367,7 +367,7 @@ func newInterleavedReaderJoiner( } if err := irj.initRowFetcher( - spec.Tables, tables, spec.Reverse, spec.LockingStrength, &irj.alloc, + flowCtx, spec.Tables, tables, spec.Reverse, spec.LockingStrength, &irj.alloc, ); err != nil { return nil, err } @@ -400,6 +400,7 @@ func newInterleavedReaderJoiner( } func (irj *interleavedReaderJoiner) initRowFetcher( + flowCtx *execinfra.FlowCtx, tables []execinfrapb.InterleavedReaderJoinerSpec_Table, tableInfos []tableInfo, reverseScan bool, @@ -427,6 +428,7 @@ func (irj *interleavedReaderJoiner) initRowFetcher( } return irj.fetcher.Init( + flowCtx.Codec(), reverseScan, lockStr, true, /* returnRangeInfo */ diff --git a/pkg/sql/rowexec/interleaved_reader_joiner_test.go b/pkg/sql/rowexec/interleaved_reader_joiner_test.go index 839999162705..e3a4b35edf58 100644 --- a/pkg/sql/rowexec/interleaved_reader_joiner_test.go +++ b/pkg/sql/rowexec/interleaved_reader_joiner_test.go @@ -17,6 +17,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -35,7 +36,7 @@ import ( // min and max are inclusive bounds on the root table's ID. // If min and/or max is -1, then no bound is used for that endpoint. func makeSpanWithRootBound(desc *sqlbase.TableDescriptor, min int, max int) roachpb.Span { - keyPrefix := sqlbase.MakeIndexKeyPrefix(desc, desc.PrimaryIndex.ID) + keyPrefix := sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, desc, desc.PrimaryIndex.ID) startKey := roachpb.Key(append([]byte(nil), keyPrefix...)) if min != -1 { @@ -132,12 +133,12 @@ func TestInterleavedReaderJoiner(t *testing.T) { { Desc: *pd, Ordering: execinfrapb.Ordering{Columns: []execinfrapb.Ordering_Column{{ColIdx: 0, Direction: execinfrapb.Ordering_Column_ASC}}}, - Spans: []execinfrapb.TableReaderSpan{{Span: pd.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: pd.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, { Desc: *cd1, Ordering: execinfrapb.Ordering{Columns: []execinfrapb.Ordering_Column{{ColIdx: 0, Direction: execinfrapb.Ordering_Column_ASC}}}, - Spans: []execinfrapb.TableReaderSpan{{Span: cd1.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: cd1.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, }, Type: sqlbase.InnerJoin, @@ -150,10 +151,10 @@ func TestInterleavedReaderJoiner(t *testing.T) { pdCd2Spec := copySpec(pdCd1Spec) pdCd2Spec.Tables[1].Desc = *cd2 - pdCd2Spec.Tables[1].Spans = []execinfrapb.TableReaderSpan{{Span: cd2.PrimaryIndexSpan()}} + pdCd2Spec.Tables[1].Spans = []execinfrapb.TableReaderSpan{{Span: cd2.PrimaryIndexSpan(keys.SystemSQLCodec)}} pdCd3Spec := copySpec(pdCd1Spec) pdCd3Spec.Tables[1].Desc = *cd3 - pdCd3Spec.Tables[1].Spans = []execinfrapb.TableReaderSpan{{Span: cd3.PrimaryIndexSpan()}} + pdCd3Spec.Tables[1].Spans = []execinfrapb.TableReaderSpan{{Span: cd3.PrimaryIndexSpan(keys.SystemSQLCodec)}} testCases := []struct { spec execinfrapb.InterleavedReaderJoinerSpec @@ -476,12 +477,12 @@ func TestInterleavedReaderJoinerErrors(t *testing.T) { { Desc: *pd, Ordering: execinfrapb.Ordering{Columns: []execinfrapb.Ordering_Column{{ColIdx: 0, Direction: execinfrapb.Ordering_Column_ASC}}}, - Spans: []execinfrapb.TableReaderSpan{{Span: pd.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: pd.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, { Desc: *cd, Ordering: execinfrapb.Ordering{Columns: []execinfrapb.Ordering_Column{{ColIdx: 0, Direction: execinfrapb.Ordering_Column_DESC}}}, - Spans: []execinfrapb.TableReaderSpan{{Span: cd.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: cd.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, }, Type: sqlbase.InnerJoin, @@ -495,7 +496,7 @@ func TestInterleavedReaderJoinerErrors(t *testing.T) { { Desc: *pd, Ordering: execinfrapb.Ordering{Columns: []execinfrapb.Ordering_Column{{ColIdx: 0, Direction: execinfrapb.Ordering_Column_ASC}}}, - Spans: []execinfrapb.TableReaderSpan{{Span: pd.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: pd.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, }, Type: sqlbase.InnerJoin, @@ -509,12 +510,12 @@ func TestInterleavedReaderJoinerErrors(t *testing.T) { { Desc: *cd, Ordering: execinfrapb.Ordering{Columns: []execinfrapb.Ordering_Column{{ColIdx: 0, Direction: execinfrapb.Ordering_Column_ASC}}}, - Spans: []execinfrapb.TableReaderSpan{{Span: pd.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: pd.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, { Desc: *gcd, Ordering: execinfrapb.Ordering{Columns: []execinfrapb.Ordering_Column{{ColIdx: 0, Direction: execinfrapb.Ordering_Column_ASC}}}, - Spans: []execinfrapb.TableReaderSpan{{Span: gcd.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: gcd.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, }, Type: sqlbase.InnerJoin, @@ -599,12 +600,12 @@ func TestInterleavedReaderJoinerTrailingMetadata(t *testing.T) { { Desc: *pd, Ordering: execinfrapb.Ordering{Columns: []execinfrapb.Ordering_Column{{ColIdx: 0, Direction: execinfrapb.Ordering_Column_ASC}}}, - Spans: []execinfrapb.TableReaderSpan{{Span: pd.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: pd.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, { Desc: *cd, Ordering: execinfrapb.Ordering{Columns: []execinfrapb.Ordering_Column{{ColIdx: 0, Direction: execinfrapb.Ordering_Column_ASC}}}, - Spans: []execinfrapb.TableReaderSpan{{Span: cd.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: cd.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, }, Type: sqlbase.InnerJoin, diff --git a/pkg/sql/rowexec/joinreader.go b/pkg/sql/rowexec/joinreader.go index 0dc8b5d92682..28c77a0ff2d8 100644 --- a/pkg/sql/rowexec/joinreader.go +++ b/pkg/sql/rowexec/joinreader.go @@ -205,7 +205,7 @@ func newJoinReader( var fetcher row.Fetcher _, _, err = initRowFetcher( - &fetcher, &jr.desc, int(spec.IndexIdx), jr.colIdxMap, false, /* reverse */ + flowCtx, &fetcher, &jr.desc, int(spec.IndexIdx), jr.colIdxMap, false, /* reverse */ neededRightCols, false /* isCheck */, &jr.alloc, spec.Visibility, spec.LockingStrength, ) if err != nil { @@ -219,7 +219,7 @@ func newJoinReader( jr.fetcher = &fetcher } - jr.spanBuilder = span.MakeBuilder(&jr.desc, jr.index) + jr.spanBuilder = span.MakeBuilder(flowCtx.Codec(), &jr.desc, jr.index) jr.spanBuilder.SetNeededColumns(jr.neededRightCols()) ctx := flowCtx.EvalCtx.Ctx() diff --git a/pkg/sql/rowexec/rowfetcher.go b/pkg/sql/rowexec/rowfetcher.go index baad616300fd..c44c106e7325 100644 --- a/pkg/sql/rowexec/rowfetcher.go +++ b/pkg/sql/rowexec/rowfetcher.go @@ -16,6 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -53,6 +54,7 @@ type rowFetcher interface { // initRowFetcher initializes the fetcher. func initRowFetcher( + flowCtx *execinfra.FlowCtx, fetcher *row.Fetcher, desc *sqlbase.TableDescriptor, indexIdx int, @@ -83,7 +85,13 @@ func initRowFetcher( ValNeededForCol: valNeededForCol, } if err := fetcher.Init( - reverseScan, lockStr, true /* returnRangeInfo */, isCheck, alloc, tableArgs, + flowCtx.Codec(), + reverseScan, + lockStr, + true, /* returnRangeInfo */ + isCheck, + alloc, + tableArgs, ); err != nil { return nil, false, err } diff --git a/pkg/sql/rowexec/scrub_tablereader.go b/pkg/sql/rowexec/scrub_tablereader.go index 9bc5b2243c78..0e18967c0ad0 100644 --- a/pkg/sql/rowexec/scrub_tablereader.go +++ b/pkg/sql/rowexec/scrub_tablereader.go @@ -121,8 +121,8 @@ func newScrubTableReader( var fetcher row.Fetcher if _, _, err := initRowFetcher( - &fetcher, &tr.tableDesc, int(spec.IndexIdx), tr.tableDesc.ColumnIdxMap(), spec.Reverse, - neededColumns, true /* isCheck */, &tr.alloc, + flowCtx, &fetcher, &tr.tableDesc, int(spec.IndexIdx), tr.tableDesc.ColumnIdxMap(), + spec.Reverse, neededColumns, true /* isCheck */, &tr.alloc, execinfrapb.ScanVisibility_PUBLIC, spec.LockingStrength, ); err != nil { return nil, err diff --git a/pkg/sql/rowexec/tablereader.go b/pkg/sql/rowexec/tablereader.go index 5ce550661a2f..4321f9d0e1e6 100644 --- a/pkg/sql/rowexec/tablereader.go +++ b/pkg/sql/rowexec/tablereader.go @@ -116,7 +116,7 @@ func newTableReader( var fetcher row.Fetcher columnIdxMap := spec.Table.ColumnIdxMapWithMutations(returnMutations) if _, _, err := initRowFetcher( - &fetcher, &spec.Table, int(spec.IndexIdx), columnIdxMap, spec.Reverse, + flowCtx, &fetcher, &spec.Table, int(spec.IndexIdx), columnIdxMap, spec.Reverse, neededColumns, spec.IsCheck, &tr.alloc, spec.Visibility, spec.LockingStrength, ); err != nil { return nil, err diff --git a/pkg/sql/rowexec/tablereader_test.go b/pkg/sql/rowexec/tablereader_test.go index 2857cb56ce15..608e16528e05 100644 --- a/pkg/sql/rowexec/tablereader_test.go +++ b/pkg/sql/rowexec/tablereader_test.go @@ -18,6 +18,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -68,7 +69,7 @@ func TestTableReader(t *testing.T) { makeIndexSpan := func(start, end int) execinfrapb.TableReaderSpan { var span roachpb.Span - prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(td, td.Indexes[0].ID)) + prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, td, td.Indexes[0].ID)) span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) span.EndKey = append(span.EndKey, prefix...) span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) @@ -82,7 +83,7 @@ func TestTableReader(t *testing.T) { }{ { spec: execinfrapb.TableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, post: execinfrapb.PostProcessSpec{ Filter: execinfrapb.Expression{Expr: "@3 < 5 AND @2 != 3"}, // sum < 5 && b != 3 @@ -93,7 +94,7 @@ func TestTableReader(t *testing.T) { }, { spec: execinfrapb.TableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td.PrimaryIndexSpan(keys.SystemSQLCodec)}}, }, post: execinfrapb.PostProcessSpec{ Filter: execinfrapb.Expression{Expr: "@3 < 5 AND @2 != 3"}, @@ -219,7 +220,7 @@ ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[ NodeID: nodeID, } spec := execinfrapb.TableReaderSpec{ - Spans: []execinfrapb.TableReaderSpan{{Span: td.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: td.PrimaryIndexSpan(keys.SystemSQLCodec)}}, Table: *td, } post := execinfrapb.PostProcessSpec{ @@ -325,7 +326,7 @@ func TestLimitScans(t *testing.T) { } spec := execinfrapb.TableReaderSpec{ Table: *tableDesc, - Spans: []execinfrapb.TableReaderSpan{{Span: tableDesc.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: tableDesc.PrimaryIndexSpan(keys.SystemSQLCodec)}}, } // We're going to ask for 3 rows, all contained in the first range. const limit = 3 @@ -431,7 +432,7 @@ func BenchmarkTableReader(b *testing.B) { b.Run(fmt.Sprintf("rows=%d", numRows), func(b *testing.B) { spec := execinfrapb.TableReaderSpec{ Table: *tableDesc, - Spans: []execinfrapb.TableReaderSpan{{Span: tableDesc.PrimaryIndexSpan()}}, + Spans: []execinfrapb.TableReaderSpan{{Span: tableDesc.PrimaryIndexSpan(keys.SystemSQLCodec)}}, } post := execinfrapb.PostProcessSpec{} diff --git a/pkg/sql/rowexec/zigzagjoiner.go b/pkg/sql/rowexec/zigzagjoiner.go index 01ab560959c5..d2bf2d84220c 100644 --- a/pkg/sql/rowexec/zigzagjoiner.go +++ b/pkg/sql/rowexec/zigzagjoiner.go @@ -320,7 +320,7 @@ func newZigzagJoiner( return nil, err } } - if err := z.setupInfo(spec, i, colOffset); err != nil { + if err := z.setupInfo(flowCtx, spec, i, colOffset); err != nil { return nil, err } colOffset += len(z.infos[i].table.Columns) @@ -393,7 +393,7 @@ type zigzagJoinerInfo struct { // to process. It is the number of columns in the tables of all previous sides // of the join. func (z *zigzagJoiner) setupInfo( - spec *execinfrapb.ZigzagJoinerSpec, side int, colOffset int, + flowCtx *execinfra.FlowCtx, spec *execinfrapb.ZigzagJoinerSpec, side int, colOffset int, ) error { z.side = side info := z.infos[side] @@ -438,11 +438,12 @@ func (z *zigzagJoiner) setupInfo( // Setup the RowContainers. info.container.Reset() - info.spanBuilder = span.MakeBuilder(info.table, info.index) + info.spanBuilder = span.MakeBuilder(flowCtx.Codec(), info.table, info.index) // Setup the Fetcher. _, _, err := initRowFetcher( - &(info.fetcher), + flowCtx, + &info.fetcher, info.table, int(indexOrdinal), info.table.ColumnIdxMap(), @@ -459,7 +460,7 @@ func (z *zigzagJoiner) setupInfo( return err } - info.prefix = sqlbase.MakeIndexKeyPrefix(info.table, info.index.ID) + info.prefix = sqlbase.MakeIndexKeyPrefix(flowCtx.Codec(), info.table, info.index.ID) span, err := z.produceSpanFromBaseRow() if err != nil { diff --git a/pkg/sql/scatter.go b/pkg/sql/scatter.go index f79189245492..5717e4c5d854 100644 --- a/pkg/sql/scatter.go +++ b/pkg/sql/scatter.go @@ -40,7 +40,7 @@ func (p *planner) Scatter(ctx context.Context, n *tree.Scatter) (planNode, error var span roachpb.Span if n.From == nil { // No FROM/TO specified; the span is the entire table/index. - span = tableDesc.IndexSpan(index.ID) + span = tableDesc.IndexSpan(p.ExecCfg().Codec, index.ID) } else { switch { case len(n.From) == 0: @@ -91,11 +91,11 @@ func (p *planner) Scatter(ctx context.Context, n *tree.Scatter) (planNode, error } } - span.Key, err = getRowKey(tableDesc.TableDesc(), index, fromVals) + span.Key, err = getRowKey(p.ExecCfg().Codec, tableDesc.TableDesc(), index, fromVals) if err != nil { return nil, err } - span.EndKey, err = getRowKey(tableDesc.TableDesc(), index, toVals) + span.EndKey, err = getRowKey(p.ExecCfg().Codec, tableDesc.TableDesc(), index, toVals) if err != nil { return nil, err } diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index d7895d0572a8..a3bbaa2e62b3 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -887,7 +887,7 @@ func (sc *SchemaChanger) done(ctx context.Context) (*sqlbase.ImmutableTableDescr // If we performed MakeMutationComplete on a PrimaryKeySwap mutation, then we need to start // a job for the index deletion mutations that the primary key swap mutation added, if any. mutationID := scDesc.ClusterVersion.NextMutationID - span := scDesc.PrimaryIndexSpan() + span := scDesc.PrimaryIndexSpan(sc.execCfg.Codec) var spanList []jobspb.ResumeSpanList for j := len(scDesc.ClusterVersion.Mutations); j < len(scDesc.Mutations); j++ { spanList = append(spanList, @@ -1221,7 +1221,7 @@ func (sc *SchemaChanger) updateJobForRollback( ctx context.Context, txn *kv.Txn, tableDesc *sqlbase.TableDescriptor, ) error { // Initialize refresh spans to scan the entire table. - span := tableDesc.PrimaryIndexSpan() + span := tableDesc.PrimaryIndexSpan(sc.execCfg.Codec) var spanList []jobspb.ResumeSpanList for _, m := range tableDesc.Mutations { if m.MutationID == sc.mutationID { @@ -1495,7 +1495,10 @@ func (*SchemaChangerTestingKnobs) ModuleTestingKnobs() {} // used in the surrounding SQL session, so session tracing is unable // to capture schema change activity. func createSchemaChangeEvalCtx( - ctx context.Context, ts hlc.Timestamp, ieFactory sqlutil.SessionBoundInternalExecutorFactory, + ctx context.Context, + execCfg *ExecutorConfig, + ts hlc.Timestamp, + ieFactory sqlutil.SessionBoundInternalExecutorFactory, ) extendedEvalContext { dummyLocation := time.UTC @@ -1522,6 +1525,7 @@ func createSchemaChangeEvalCtx( // because it sets "enabled: false" and thus none of the // other fields are used. Tracing: &SessionTracing{}, + ExecCfg: execCfg, EvalContext: tree.EvalContext{ SessionData: sd, InternalExecutor: ieFactory(ctx, sd), @@ -1532,6 +1536,13 @@ func createSchemaChangeEvalCtx( Planner: &sqlbase.DummyEvalPlanner{}, SessionAccessor: &sqlbase.DummySessionAccessor{}, PrivilegedAccessor: &sqlbase.DummyPrivilegedAccessor{}, + Settings: execCfg.Settings, + TestingKnobs: execCfg.EvalContextTestingKnobs, + ClusterID: execCfg.ClusterID(), + ClusterName: execCfg.RPCContext.ClusterName(), + NodeID: execCfg.NodeID.Get(), + Codec: execCfg.Codec, + Locality: execCfg.Locality, }, } // The backfill is going to use the current timestamp for the various diff --git a/pkg/sql/scrub_physical.go b/pkg/sql/scrub_physical.go index 02a66ae89a8e..1e5cd7f516f1 100644 --- a/pkg/sql/scrub_physical.go +++ b/pkg/sql/scrub_physical.go @@ -112,7 +112,7 @@ func (o *physicalCheckOperation) Start(params runParams) error { return err } scan.index = scan.specifiedIndex - sb := span.MakeBuilder(o.tableDesc.TableDesc(), o.indexDesc) + sb := span.MakeBuilder(params.ExecCfg().Codec, o.tableDesc.TableDesc(), o.indexDesc) scan.spans, err = sb.UnconstrainedSpans() if err != nil { return err diff --git a/pkg/sql/scrub_test.go b/pkg/sql/scrub_test.go index e93b445c2772..b785b9ecc2ee 100644 --- a/pkg/sql/scrub_test.go +++ b/pkg/sql/scrub_test.go @@ -64,7 +64,7 @@ INSERT INTO t."tEst" VALUES (10, 20); // Construct the secondary index key that is currently in the // database. secondaryIndexKey, err := sqlbase.EncodeSecondaryIndex( - tableDesc, secondaryIndex, colIDtoRowIndex, values, true /* includeEmpty */) + keys.SystemSQLCodec, tableDesc, secondaryIndex, colIDtoRowIndex, values, true /* includeEmpty */) if err != nil { t.Fatalf("unexpected error: %s", err) } @@ -132,7 +132,7 @@ CREATE INDEX secondary ON t.test (v); // Construct datums and secondary k/v for our row values (k, v). values := []tree.Datum{tree.NewDInt(10), tree.NewDInt(314)} secondaryIndex, err := sqlbase.EncodeSecondaryIndex( - tableDesc, secondaryIndexDesc, colIDtoRowIndex, values, true /* includeEmpty */) + keys.SystemSQLCodec, tableDesc, secondaryIndexDesc, colIDtoRowIndex, values, true /* includeEmpty */) if err != nil { t.Fatalf("unexpected error: %s", err) } @@ -226,7 +226,7 @@ INSERT INTO t.test VALUES (10, 20, 1337); // Generate the existing secondary index key. values := []tree.Datum{tree.NewDInt(10), tree.NewDInt(20), tree.NewDInt(1337)} secondaryIndex, err := sqlbase.EncodeSecondaryIndex( - tableDesc, secondaryIndexDesc, colIDtoRowIndex, values, true /* includeEmpty */) + keys.SystemSQLCodec, tableDesc, secondaryIndexDesc, colIDtoRowIndex, values, true /* includeEmpty */) if len(secondaryIndex) != 1 { t.Fatalf("expected 1 index entry, got %d. got %#v", len(secondaryIndex), secondaryIndex) @@ -243,7 +243,7 @@ INSERT INTO t.test VALUES (10, 20, 1337); // Generate a secondary index k/v that has a different value. values = []tree.Datum{tree.NewDInt(10), tree.NewDInt(20), tree.NewDInt(314)} secondaryIndex, err = sqlbase.EncodeSecondaryIndex( - tableDesc, secondaryIndexDesc, colIDtoRowIndex, values, true /* includeEmpty */) + keys.SystemSQLCodec, tableDesc, secondaryIndexDesc, colIDtoRowIndex, values, true /* includeEmpty */) if err != nil { t.Fatalf("unexpected error: %s", err) } @@ -342,7 +342,8 @@ INSERT INTO t.test VALUES (10, 2); // Create the primary index key. values := []tree.Datum{tree.NewDInt(10), tree.NewDInt(2)} - primaryIndexKeyPrefix := sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID) + primaryIndexKeyPrefix := sqlbase.MakeIndexKeyPrefix( + keys.SystemSQLCodec, tableDesc, tableDesc.PrimaryIndex.ID) primaryIndexKey, _, err := sqlbase.EncodeIndexKey( tableDesc, &tableDesc.PrimaryIndex, colIDtoRowIndex, values, primaryIndexKeyPrefix) if err != nil { @@ -444,7 +445,7 @@ func TestScrubFKConstraintFKMissing(t *testing.T) { // Construct the secondary index key entry as it exists in the // database. secondaryIndexKey, err := sqlbase.EncodeSecondaryIndex( - tableDesc, secondaryIndex, colIDtoRowIndex, values, true /* includeEmpty */) + keys.SystemSQLCodec, tableDesc, secondaryIndex, colIDtoRowIndex, values, true /* includeEmpty */) if err != nil { t.Fatalf("unexpected error: %s", err) } @@ -464,7 +465,7 @@ func TestScrubFKConstraintFKMissing(t *testing.T) { // Construct the new secondary index key that will be inserted. secondaryIndexKey, err = sqlbase.EncodeSecondaryIndex( - tableDesc, secondaryIndex, colIDtoRowIndex, values, true /* includeEmpty */) + keys.SystemSQLCodec, tableDesc, secondaryIndex, colIDtoRowIndex, values, true /* includeEmpty */) if err != nil { t.Fatalf("unexpected error: %s", err) } @@ -578,7 +579,8 @@ INSERT INTO t.test VALUES (217, 314); colIDtoRowIndex[tableDesc.Columns[1].ID] = 1 // Create the primary index key - primaryIndexKeyPrefix := sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID) + primaryIndexKeyPrefix := sqlbase.MakeIndexKeyPrefix( + keys.SystemSQLCodec, tableDesc, tableDesc.PrimaryIndex.ID) primaryIndexKey, _, err := sqlbase.EncodeIndexKey( tableDesc, &tableDesc.PrimaryIndex, colIDtoRowIndex, values, primaryIndexKeyPrefix) if err != nil { @@ -659,7 +661,8 @@ INSERT INTO t.test VALUES (217, 314, 1337); colIDtoRowIndex[tableDesc.Columns[2].ID] = 2 // Create the primary index key - primaryIndexKeyPrefix := sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID) + primaryIndexKeyPrefix := sqlbase.MakeIndexKeyPrefix( + keys.SystemSQLCodec, tableDesc, tableDesc.PrimaryIndex.ID) primaryIndexKey, _, err := sqlbase.EncodeIndexKey( tableDesc, &tableDesc.PrimaryIndex, colIDtoRowIndex, values, primaryIndexKeyPrefix) if err != nil { @@ -762,7 +765,8 @@ CREATE TABLE t.test ( colIDtoRowIndex[tableDesc.Columns[1].ID] = 1 // Create the primary index key - primaryIndexKeyPrefix := sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID) + primaryIndexKeyPrefix := sqlbase.MakeIndexKeyPrefix( + keys.SystemSQLCodec, tableDesc, tableDesc.PrimaryIndex.ID) primaryIndexKey, _, err := sqlbase.EncodeIndexKey( tableDesc, &tableDesc.PrimaryIndex, colIDtoRowIndex, values, primaryIndexKeyPrefix) if err != nil { @@ -865,7 +869,8 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v1 INT, v2 INT); colIDtoRowIndex[tableDesc.Columns[2].ID] = 2 // Create the primary index key - primaryIndexKeyPrefix := sqlbase.MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID) + primaryIndexKeyPrefix := sqlbase.MakeIndexKeyPrefix( + keys.SystemSQLCodec, tableDesc, tableDesc.PrimaryIndex.ID) primaryIndexKey, _, err := sqlbase.EncodeIndexKey( tableDesc, &tableDesc.PrimaryIndex, colIDtoRowIndex, values, primaryIndexKeyPrefix) if err != nil { diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go index 7208f11877d6..86d50eb6b271 100644 --- a/pkg/sql/sem/builtins/builtins.go +++ b/pkg/sql/sem/builtins/builtins.go @@ -2864,7 +2864,7 @@ may increase either contention or retry errors, or both.`, } if indexDesc.ID == tableDesc.PrimaryIndex.ID { - keyPrefix := sqlbase.MakeIndexKeyPrefix(tableDesc, indexDesc.ID) + keyPrefix := sqlbase.MakeIndexKeyPrefix(ctx.Codec, tableDesc, indexDesc.ID) res, _, err := sqlbase.EncodeIndexKey(tableDesc, indexDesc, colMap, datums, keyPrefix) if err != nil { return nil, err @@ -2872,7 +2872,7 @@ may increase either contention or retry errors, or both.`, return tree.NewDBytes(tree.DBytes(res)), err } // We have a secondary index. - res, err := sqlbase.EncodeSecondaryIndex(tableDesc, indexDesc, colMap, datums, true /* includeEmpty */) + res, err := sqlbase.EncodeSecondaryIndex(ctx.Codec, tableDesc, indexDesc, colMap, datums, true /* includeEmpty */) if err != nil { return nil, err } diff --git a/pkg/sql/sem/tree/eval.go b/pkg/sql/sem/tree/eval.go index 794a1b291e44..173908b5d2e5 100644 --- a/pkg/sql/sem/tree/eval.go +++ b/pkg/sql/sem/tree/eval.go @@ -23,6 +23,7 @@ import ( "github.com/cockroachdb/apd" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" @@ -2829,8 +2830,9 @@ type EvalContext struct { Settings *cluster.Settings ClusterID uuid.UUID - NodeID roachpb.NodeID ClusterName string + NodeID roachpb.NodeID + Codec keys.SQLCodec // Locality contains the location of the current node as a set of user-defined // key/value pairs, ordered from most inclusive to least inclusive. If there @@ -2940,6 +2942,7 @@ func MakeTestingEvalContext(st *cluster.Settings) EvalContext { // EvalContext so do not start or close the memory monitor. func MakeTestingEvalContextWithMon(st *cluster.Settings, monitor *mon.BytesMonitor) EvalContext { ctx := EvalContext{ + Codec: keys.SystemSQLCodec, Txn: &kv.Txn{}, SessionData: &sessiondata.SessionData{}, Settings: st, diff --git a/pkg/sql/set_zone_config.go b/pkg/sql/set_zone_config.go index 0484eeef80ad..3771ef568803 100644 --- a/pkg/sql/set_zone_config.go +++ b/pkg/sql/set_zone_config.go @@ -840,7 +840,7 @@ func writeZoneConfig( if len(zone.Subzones) > 0 { st := execCfg.Settings zone.SubzoneSpans, err = GenerateSubzoneSpans( - st, execCfg.ClusterID(), table, zone.Subzones, hasNewSubzones) + st, execCfg.ClusterID(), execCfg.Codec, table, zone.Subzones, hasNewSubzones) if err != nil { return 0, err } diff --git a/pkg/sql/show_create.go b/pkg/sql/show_create.go index 53bc3ba4e565..8dec3b42efd6 100644 --- a/pkg/sql/show_create.go +++ b/pkg/sql/show_create.go @@ -136,7 +136,7 @@ func ShowCreateTable( } } if err := ShowCreatePartitioning( - a, desc, idx, &idx.Partitioning, &f.Buffer, 1 /* indent */, 0, /* colOffset */ + a, p.ExecCfg().Codec, desc, idx, &idx.Partitioning, &f.Buffer, 1 /* indent */, 0, /* colOffset */ ); err != nil { return "", err } @@ -151,7 +151,7 @@ func ShowCreateTable( return "", err } if err := ShowCreatePartitioning( - a, desc, &desc.PrimaryIndex, &desc.PrimaryIndex.Partitioning, &f.Buffer, 0 /* indent */, 0, /* colOffset */ + a, p.ExecCfg().Codec, desc, &desc.PrimaryIndex, &desc.PrimaryIndex.Partitioning, &f.Buffer, 0 /* indent */, 0, /* colOffset */ ); err != nil { return "", err } diff --git a/pkg/sql/show_create_clauses.go b/pkg/sql/show_create_clauses.go index 70e2bd0e7b81..f9af4a3ac44c 100644 --- a/pkg/sql/show_create_clauses.go +++ b/pkg/sql/show_create_clauses.go @@ -282,6 +282,7 @@ func showCreateInterleave( // index, if applicable. func ShowCreatePartitioning( a *sqlbase.DatumAlloc, + codec keys.SQLCodec, tableDesc *sqlbase.TableDescriptor, idxDesc *sqlbase.IndexDescriptor, partDesc *sqlbase.PartitioningDescriptor, @@ -334,7 +335,7 @@ func ShowCreatePartitioning( buf.WriteString(`, `) } tuple, _, err := sqlbase.DecodePartitionTuple( - a, tableDesc, idxDesc, partDesc, values, fakePrefixDatums) + a, codec, tableDesc, idxDesc, partDesc, values, fakePrefixDatums) if err != nil { return err } @@ -342,7 +343,7 @@ func ShowCreatePartitioning( } buf.WriteString(`)`) if err := ShowCreatePartitioning( - a, tableDesc, idxDesc, &part.Subpartitioning, buf, indent+1, + a, codec, tableDesc, idxDesc, &part.Subpartitioning, buf, indent+1, colOffset+int(partDesc.NumColumns), ); err != nil { return err @@ -358,14 +359,14 @@ func ShowCreatePartitioning( buf.WriteString(part.Name) buf.WriteString(" VALUES FROM ") fromTuple, _, err := sqlbase.DecodePartitionTuple( - a, tableDesc, idxDesc, partDesc, part.FromInclusive, fakePrefixDatums) + a, codec, tableDesc, idxDesc, partDesc, part.FromInclusive, fakePrefixDatums) if err != nil { return err } buf.WriteString(fromTuple.String()) buf.WriteString(" TO ") toTuple, _, err := sqlbase.DecodePartitionTuple( - a, tableDesc, idxDesc, partDesc, part.ToExclusive, fakePrefixDatums) + a, codec, tableDesc, idxDesc, partDesc, part.ToExclusive, fakePrefixDatums) if err != nil { return err } diff --git a/pkg/sql/span/span_builder.go b/pkg/sql/span/span_builder.go index 1aa3054d7f9a..628248323752 100644 --- a/pkg/sql/span/span_builder.go +++ b/pkg/sql/span/span_builder.go @@ -25,6 +25,7 @@ import ( // Builder is a single struct for generating key spans from Constraints, Datums and encDatums. type Builder struct { + codec keys.SQLCodec table *sqlbase.TableDescriptor index *sqlbase.IndexDescriptor indexColTypes []types.T @@ -48,11 +49,14 @@ var _ = (*Builder).SetNeededFamilies var _ = (*Builder).UnsetNeededFamilies // MakeBuilder creates a Builder for a table and index. -func MakeBuilder(table *sqlbase.TableDescriptor, index *sqlbase.IndexDescriptor) *Builder { +func MakeBuilder( + codec keys.SQLCodec, table *sqlbase.TableDescriptor, index *sqlbase.IndexDescriptor, +) *Builder { s := &Builder{ + codec: codec, table: table, index: index, - KeyPrefix: sqlbase.MakeIndexKeyPrefix(table, index.ID), + KeyPrefix: sqlbase.MakeIndexKeyPrefix(codec, table, index.ID), interstices: make([][]byte, len(index.ColumnDirections)+len(index.ExtraColumnIDs)+1), neededFamilies: nil, } @@ -266,7 +270,7 @@ func (s *Builder) appendSpansFromConstraintSpan( // last parent key. If cs.End.Inclusive is true, we also advance the key as // necessary. endInclusive := cs.EndBoundary() == constraint.IncludeBoundary - span.EndKey, err = sqlbase.AdjustEndKeyForInterleave(s.table, s.index, span.EndKey, endInclusive) + span.EndKey, err = sqlbase.AdjustEndKeyForInterleave(s.codec, s.table, s.index, span.EndKey, endInclusive) if err != nil { return nil, err } diff --git a/pkg/sql/span_builder_test.go b/pkg/sql/span_builder_test.go index 8d6e641662cf..7943393feeea 100644 --- a/pkg/sql/span_builder_test.go +++ b/pkg/sql/span_builder_test.go @@ -31,6 +31,7 @@ func TestSpanBuilderCanSplitSpan(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) + execCfg := s.ExecutorConfig().(ExecutorConfig) tcs := []struct { sql string index string @@ -101,7 +102,7 @@ func TestSpanBuilderCanSplitSpan(t *testing.T) { if err != nil { t.Fatal(err) } - builder := span.MakeBuilder(desc, idx) + builder := span.MakeBuilder(execCfg.Codec, desc, idx) if res := builder.CanSplitSpanIntoSeparateFamilies( tc.numNeededFamilies, tc.prefixLen, tc.containsNull); res != tc.canSplit { t.Errorf("expected result to be %v, but found %v", tc.canSplit, res) diff --git a/pkg/sql/split.go b/pkg/sql/split.go index fe3b258fd48c..2a399809a55b 100644 --- a/pkg/sql/split.go +++ b/pkg/sql/split.go @@ -49,7 +49,7 @@ func (n *splitNode) Next(params runParams) (bool, error) { return ok, err } - rowKey, err := getRowKey(n.tableDesc, n.index, n.rows.Values()) + rowKey, err := getRowKey(params.ExecCfg().Codec, n.tableDesc, n.index, n.rows.Values()) if err != nil { return false, err } @@ -83,13 +83,16 @@ func (n *splitNode) Close(ctx context.Context) { // getRowKey generates a key that corresponds to a row (or prefix of a row) in a table or index. // Both tableDesc and index are required (index can be the primary index). func getRowKey( - tableDesc *sqlbase.TableDescriptor, index *sqlbase.IndexDescriptor, values []tree.Datum, + codec keys.SQLCodec, + tableDesc *sqlbase.TableDescriptor, + index *sqlbase.IndexDescriptor, + values []tree.Datum, ) ([]byte, error) { colMap := make(map[sqlbase.ColumnID]int) for i := range values { colMap[index.ColumnIDs[i]] = i } - prefix := sqlbase.MakeIndexKeyPrefix(tableDesc, index.ID) + prefix := sqlbase.MakeIndexKeyPrefix(codec, tableDesc, index.ID) key, _, err := sqlbase.EncodePartialIndexKey( tableDesc, index, len(values), colMap, values, prefix, ) diff --git a/pkg/sql/sqlbase/index_encoding.go b/pkg/sql/sqlbase/index_encoding.go index bc6a083f73a2..1d1ceb0da87c 100644 --- a/pkg/sql/sqlbase/index_encoding.go +++ b/pkg/sql/sqlbase/index_encoding.go @@ -34,13 +34,12 @@ import ( // MakeIndexKeyPrefix returns the key prefix used for the index's data. If you // need the corresponding Span, prefer desc.IndexSpan(indexID) or // desc.PrimaryIndexSpan(). -func MakeIndexKeyPrefix(desc *TableDescriptor, indexID IndexID) []byte { - keyGen := &keys.TODOSQLCodec +func MakeIndexKeyPrefix(codec keys.SQLCodec, desc *TableDescriptor, indexID IndexID) []byte { if i, err := desc.FindIndexByID(indexID); err == nil && len(i.Interleave.Ancestors) > 0 { ancestor := &i.Interleave.Ancestors[0] - return keyGen.IndexPrefix(uint32(ancestor.TableID), uint32(ancestor.IndexID)) + return codec.IndexPrefix(uint32(ancestor.TableID), uint32(ancestor.IndexID)) } - return keyGen.IndexPrefix(uint32(desc.ID), uint32(indexID)) + return codec.IndexPrefix(uint32(desc.ID), uint32(indexID)) } // EncodeIndexKey creates a key by concatenating keyPrefix with the @@ -513,9 +512,9 @@ func DecodePartialTableIDIndexID(key []byte) ([]byte, ID, IndexID, error) { // // Don't use this function in the scan "hot path". func DecodeIndexKeyPrefix( - desc *TableDescriptor, key []byte, + codec keys.SQLCodec, desc *TableDescriptor, key []byte, ) (indexID IndexID, remaining []byte, err error) { - key, err = keys.TODOSQLCodec.StripTenantPrefix(key) + key, err = codec.StripTenantPrefix(key) if err != nil { return 0, nil, err } @@ -583,6 +582,7 @@ func DecodeIndexKeyPrefix( // empty. If the given descriptor does not match the key, false is returned with // no error. func DecodeIndexKey( + codec keys.SQLCodec, desc *TableDescriptor, index *IndexDescriptor, types []types.T, @@ -590,7 +590,7 @@ func DecodeIndexKey( colDirs []IndexDescriptor_Direction, key []byte, ) (remainingKey []byte, matches bool, foundNull bool, _ error) { - key, err := keys.TODOSQLCodec.StripTenantPrefix(key) + key, err := codec.StripTenantPrefix(key) if err != nil { return nil, false, false, err } @@ -707,9 +707,9 @@ func DecodeKeyVals( // // Don't use this function in the scan "hot path". func ExtractIndexKey( - a *DatumAlloc, tableDesc *TableDescriptor, entry kv.KeyValue, + a *DatumAlloc, codec keys.SQLCodec, tableDesc *TableDescriptor, entry kv.KeyValue, ) (roachpb.Key, error) { - indexID, key, err := DecodeIndexKeyPrefix(tableDesc, entry.Key) + indexID, key, err := DecodeIndexKeyPrefix(codec, tableDesc, entry.Key) if err != nil { return nil, err } @@ -734,7 +734,7 @@ func ExtractIndexKey( // find the index id so we can look up the descriptor, and once to extract // the values. Only parse once. var ok bool - _, ok, _, err = DecodeIndexKey(tableDesc, index, indexTypes, values, dirs, entry.Key) + _, ok, _, err = DecodeIndexKey(codec, tableDesc, index, indexTypes, values, dirs, entry.Key) if err != nil { return nil, err } @@ -779,7 +779,7 @@ func ExtractIndexKey( for i, columnID := range index.ExtraColumnIDs { colMap[columnID] = i + len(index.ColumnIDs) } - indexKeyPrefix := MakeIndexKeyPrefix(tableDesc, tableDesc.PrimaryIndex.ID) + indexKeyPrefix := MakeIndexKeyPrefix(codec, tableDesc, tableDesc.PrimaryIndex.ID) decodedValues := make([]tree.Datum, len(values)+len(extraValues)) for i, value := range values { @@ -949,13 +949,14 @@ func encodeGeoKeys(inKey []byte, geoKeys []geoindex.Key) (keys [][]byte, err err // whether or not k/v's with empty values should be returned. // It returns indexEntries in family sorted order. func EncodePrimaryIndex( + codec keys.SQLCodec, tableDesc *TableDescriptor, index *IndexDescriptor, colMap map[ColumnID]int, values []tree.Datum, includeEmpty bool, ) ([]IndexEntry, error) { - keyPrefix := MakeIndexKeyPrefix(tableDesc, index.ID) + keyPrefix := MakeIndexKeyPrefix(codec, tableDesc, index.ID) indexKey, _, err := EncodeIndexKey(tableDesc, index, colMap, values, keyPrefix) if err != nil { return nil, err @@ -1032,17 +1033,18 @@ func EncodePrimaryIndex( // empty values. For forward indexes the returned list of // index entries is in family sorted order. func EncodeSecondaryIndex( + codec keys.SQLCodec, tableDesc *TableDescriptor, secondaryIndex *IndexDescriptor, colMap map[ColumnID]int, values []tree.Datum, includeEmpty bool, ) ([]IndexEntry, error) { - secondaryIndexKeyPrefix := MakeIndexKeyPrefix(tableDesc, secondaryIndex.ID) + secondaryIndexKeyPrefix := MakeIndexKeyPrefix(codec, tableDesc, secondaryIndex.ID) // Use the primary key encoding for covering indexes. if secondaryIndex.GetEncodingType(tableDesc.PrimaryIndex.ID) == PrimaryIndexEncoding { - return EncodePrimaryIndex(tableDesc, secondaryIndex, colMap, values, includeEmpty) + return EncodePrimaryIndex(codec, tableDesc, secondaryIndex, colMap, values, includeEmpty) } var containsNull = false @@ -1286,6 +1288,7 @@ func writeColumnValues( // value (passed as a parameter so the caller can reuse between rows) and is // expected to be the same length as indexes. func EncodeSecondaryIndexes( + codec keys.SQLCodec, tableDesc *TableDescriptor, indexes []IndexDescriptor, colMap map[ColumnID]int, @@ -1297,7 +1300,7 @@ func EncodeSecondaryIndexes( panic("Length of secondaryIndexEntries was non-zero") } for i := range indexes { - entries, err := EncodeSecondaryIndex(tableDesc, &indexes[i], colMap, values, includeEmpty) + entries, err := EncodeSecondaryIndex(codec, tableDesc, &indexes[i], colMap, values, includeEmpty) if err != nil { return secondaryIndexEntries, err } @@ -1523,9 +1526,11 @@ func maxKeyTokens(index *IndexDescriptor, containsNull bool) int { // We can thus push forward the start key from /1/#/2 to /2. If the start key // was /1, we cannot push this forwards since that is the first key we want // to read. -func AdjustStartKeyForInterleave(index *IndexDescriptor, start roachpb.Key) (roachpb.Key, error) { +func AdjustStartKeyForInterleave( + codec keys.SQLCodec, index *IndexDescriptor, start roachpb.Key, +) (roachpb.Key, error) { // Remove the tenant prefix before decomposing. - strippedStart, err := keys.TODOSQLCodec.StripTenantPrefix(start) + strippedStart, err := codec.StripTenantPrefix(start) if err != nil { return roachpb.Key{}, err } @@ -1566,14 +1571,18 @@ func AdjustStartKeyForInterleave(index *IndexDescriptor, start roachpb.Key) (roa // cause issues when trying to decode the key tokens. // AdjustEndKeyForInterleave is idempotent upon successive invocation(s). func AdjustEndKeyForInterleave( - table *TableDescriptor, index *IndexDescriptor, end roachpb.Key, inclusive bool, + codec keys.SQLCodec, + table *TableDescriptor, + index *IndexDescriptor, + end roachpb.Key, + inclusive bool, ) (roachpb.Key, error) { if index.Type == IndexDescriptor_INVERTED { return end.PrefixEnd(), nil } // Remove the tenant prefix before decomposing. - strippedEnd, err := keys.TODOSQLCodec.StripTenantPrefix(end) + strippedEnd, err := codec.StripTenantPrefix(end) if err != nil { return roachpb.Key{}, err } diff --git a/pkg/sql/sqlbase/partition.go b/pkg/sql/sqlbase/partition.go index 7b9e188eccdb..c0cbde2cb82a 100644 --- a/pkg/sql/sqlbase/partition.go +++ b/pkg/sql/sqlbase/partition.go @@ -13,6 +13,7 @@ package sqlbase import ( "fmt" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -102,6 +103,7 @@ func (t *PartitionTuple) String() string { // partitioning and MINVALUE/MAXVALUE. func DecodePartitionTuple( a *DatumAlloc, + codec keys.SQLCodec, tableDesc *TableDescriptor, idxDesc *IndexDescriptor, partDesc *PartitioningDescriptor, @@ -161,7 +163,7 @@ func DecodePartitionTuple( colMap[idxDesc.ColumnIDs[i]] = i } - indexKeyPrefix := MakeIndexKeyPrefix(tableDesc, idxDesc.ID) + indexKeyPrefix := MakeIndexKeyPrefix(codec, tableDesc, idxDesc.ID) key, _, err := EncodePartialIndexKey( tableDesc, idxDesc, len(allDatums), colMap, allDatums, indexKeyPrefix) if err != nil { diff --git a/pkg/sql/sqlbase/structured.go b/pkg/sql/sqlbase/structured.go index 42bbc9f137fb..b53b6271d193 100644 --- a/pkg/sql/sqlbase/structured.go +++ b/pkg/sql/sqlbase/structured.go @@ -2147,6 +2147,11 @@ func (desc *TableDescriptor) validatePartitioningDescriptor( return nil } + // Use the system-tenant SQL codec when validating the keys in the partition + // descriptor. We just want to know how the partitions relate to one another, + // so it's fine to ignore the tenant ID prefix. + codec := keys.SystemSQLCodec + if len(partDesc.List) > 0 { listValues := make(map[string]struct{}, len(partDesc.List)) for _, p := range partDesc.List { @@ -2161,7 +2166,7 @@ func (desc *TableDescriptor) validatePartitioningDescriptor( // to match the behavior of the value when indexed. for _, valueEncBuf := range p.Values { tuple, keyPrefix, err := DecodePartitionTuple( - a, desc, idxDesc, partDesc, valueEncBuf, fakePrefixDatums) + a, codec, desc, idxDesc, partDesc, valueEncBuf, fakePrefixDatums) if err != nil { return fmt.Errorf("PARTITION %s: %v", p.Name, err) } @@ -2190,12 +2195,12 @@ func (desc *TableDescriptor) validatePartitioningDescriptor( // NB: key encoding is used to check uniqueness because it has to match // the behavior of the value when indexed. fromDatums, fromKey, err := DecodePartitionTuple( - a, desc, idxDesc, partDesc, p.FromInclusive, fakePrefixDatums) + a, codec, desc, idxDesc, partDesc, p.FromInclusive, fakePrefixDatums) if err != nil { return fmt.Errorf("PARTITION %s: %v", p.Name, err) } toDatums, toKey, err := DecodePartitionTuple( - a, desc, idxDesc, partDesc, p.ToExclusive, fakePrefixDatums) + a, codec, desc, idxDesc, partDesc, p.ToExclusive, fakePrefixDatums) if err != nil { return fmt.Errorf("PARTITION %s: %v", p.Name, err) } @@ -3580,10 +3585,10 @@ func (desc *TableDescriptor) InvalidateFKConstraints() { // AllIndexSpans returns the Spans for each index in the table, including those // being added in the mutations. -func (desc *TableDescriptor) AllIndexSpans() roachpb.Spans { +func (desc *TableDescriptor) AllIndexSpans(codec keys.SQLCodec) roachpb.Spans { var spans roachpb.Spans err := desc.ForeachNonDropIndex(func(index *IndexDescriptor) error { - spans = append(spans, desc.IndexSpan(index.ID)) + spans = append(spans, desc.IndexSpan(codec, index.ID)) return nil }) if err != nil { @@ -3594,20 +3599,22 @@ func (desc *TableDescriptor) AllIndexSpans() roachpb.Spans { // PrimaryIndexSpan returns the Span that corresponds to the entire primary // index; can be used for a full table scan. -func (desc *TableDescriptor) PrimaryIndexSpan() roachpb.Span { - return desc.IndexSpan(desc.PrimaryIndex.ID) +func (desc *TableDescriptor) PrimaryIndexSpan(codec keys.SQLCodec) roachpb.Span { + return desc.IndexSpan(codec, desc.PrimaryIndex.ID) } // IndexSpan returns the Span that corresponds to an entire index; can be used // for a full index scan. -func (desc *TableDescriptor) IndexSpan(indexID IndexID) roachpb.Span { - prefix := roachpb.Key(MakeIndexKeyPrefix(desc, indexID)) +func (desc *TableDescriptor) IndexSpan(codec keys.SQLCodec, indexID IndexID) roachpb.Span { + prefix := roachpb.Key(MakeIndexKeyPrefix(codec, desc, indexID)) return roachpb.Span{Key: prefix, EndKey: prefix.PrefixEnd()} } // TableSpan returns the Span that corresponds to the entire table. -func (desc *TableDescriptor) TableSpan() roachpb.Span { - prefix := keys.TODOSQLCodec.TablePrefix(uint32(desc.ID)) +func (desc *TableDescriptor) TableSpan(codec keys.SQLCodec) roachpb.Span { + // TODO(jordan): Why does IndexSpan consider interleaves but TableSpan does + // not? Should it? + prefix := codec.TablePrefix(uint32(desc.ID)) return roachpb.Span{Key: prefix, EndKey: prefix.PrefixEnd()} } diff --git a/pkg/sql/sqlbase/table_test.go b/pkg/sql/sqlbase/table_test.go index 52fb9b750858..bdff195a9c5c 100644 --- a/pkg/sql/sqlbase/table_test.go +++ b/pkg/sql/sqlbase/table_test.go @@ -21,6 +21,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -101,7 +102,7 @@ func makeTableDescForTest(test indexKeyTest) (TableDescriptor, map[ColumnID]int) } func decodeIndex( - tableDesc *TableDescriptor, index *IndexDescriptor, key []byte, + codec keys.SQLCodec, tableDesc *TableDescriptor, index *IndexDescriptor, key []byte, ) ([]tree.Datum, error) { types, err := GetColumnTypes(tableDesc, index.ColumnIDs) if err != nil { @@ -109,7 +110,7 @@ func decodeIndex( } values := make([]EncDatum, len(index.ColumnIDs)) colDirs := index.ColumnDirections - _, ok, _, err := DecodeIndexKey(tableDesc, index, types, values, colDirs, key) + _, ok, _, err := DecodeIndexKey(codec, tableDesc, index, types, values, colDirs, key) if err != nil { return nil, err } @@ -214,7 +215,8 @@ func TestIndexKey(t *testing.T) { testValues := append(test.primaryValues, test.secondaryValues...) - primaryKeyPrefix := MakeIndexKeyPrefix(&tableDesc, tableDesc.PrimaryIndex.ID) + codec := keys.SystemSQLCodec + primaryKeyPrefix := MakeIndexKeyPrefix(codec, &tableDesc, tableDesc.PrimaryIndex.ID) primaryKey, _, err := EncodeIndexKey( &tableDesc, &tableDesc.PrimaryIndex, colMap, testValues, primaryKeyPrefix) if err != nil { @@ -224,7 +226,7 @@ func TestIndexKey(t *testing.T) { primaryIndexKV := kv.KeyValue{Key: primaryKey, Value: &primaryValue} secondaryIndexEntry, err := EncodeSecondaryIndex( - &tableDesc, &tableDesc.Indexes[0], colMap, testValues, true /* includeEmpty */) + codec, &tableDesc, &tableDesc.Indexes[0], colMap, testValues, true /* includeEmpty */) if len(secondaryIndexEntry) != 1 { t.Fatalf("expected 1 index entry, got %d. got %#v", len(secondaryIndexEntry), secondaryIndexEntry) } @@ -237,7 +239,7 @@ func TestIndexKey(t *testing.T) { } checkEntry := func(index *IndexDescriptor, entry kv.KeyValue) { - values, err := decodeIndex(&tableDesc, index, entry.Key) + values, err := decodeIndex(codec, &tableDesc, index, entry.Key) if err != nil { t.Fatal(err) } @@ -249,7 +251,7 @@ func TestIndexKey(t *testing.T) { } } - indexID, _, err := DecodeIndexKeyPrefix(&tableDesc, entry.Key) + indexID, _, err := DecodeIndexKeyPrefix(codec, &tableDesc, entry.Key) if err != nil { t.Fatal(err) } @@ -257,7 +259,7 @@ func TestIndexKey(t *testing.T) { t.Errorf("%d", i) } - extracted, err := ExtractIndexKey(&a, &tableDesc, entry) + extracted, err := ExtractIndexKey(&a, codec, &tableDesc, entry) if err != nil { t.Fatal(err) } @@ -666,7 +668,7 @@ func TestIndexKeyEquivSignature(t *testing.T) { tc.table.indexKeyArgs.primaryValues = tc.table.values // Setup descriptors and form an index key. desc, colMap := makeTableDescForTest(tc.table.indexKeyArgs) - primaryKeyPrefix := MakeIndexKeyPrefix(&desc, desc.PrimaryIndex.ID) + primaryKeyPrefix := MakeIndexKeyPrefix(keys.SystemSQLCodec, &desc, desc.PrimaryIndex.ID) primaryKey, _, err := EncodeIndexKey( &desc, &desc.PrimaryIndex, colMap, tc.table.values, primaryKeyPrefix) if err != nil { @@ -807,7 +809,7 @@ func TestEquivSignature(t *testing.T) { // Setup descriptors and form an index key. desc, colMap := makeTableDescForTest(table.indexKeyArgs) - primaryKeyPrefix := MakeIndexKeyPrefix(&desc, desc.PrimaryIndex.ID) + primaryKeyPrefix := MakeIndexKeyPrefix(keys.SystemSQLCodec, &desc, desc.PrimaryIndex.ID) primaryKey, _, err := EncodeIndexKey( &desc, &desc.PrimaryIndex, colMap, table.values, primaryKeyPrefix) if err != nil { @@ -1055,12 +1057,14 @@ func TestAdjustStartKeyForInterleave(t *testing.T) { for i, tc := range testCases { t.Run(strconv.Itoa(i), func(t *testing.T) { - actual, err := AdjustStartKeyForInterleave(tc.index, EncodeTestKey(t, kvDB, ShortToLongKeyFmt(tc.input))) + codec := keys.SystemSQLCodec + actual := EncodeTestKey(t, kvDB, codec, ShortToLongKeyFmt(tc.input)) + actual, err := AdjustStartKeyForInterleave(codec, tc.index, actual) if err != nil { t.Fatal(err) } - expected := EncodeTestKey(t, kvDB, ShortToLongKeyFmt(tc.expected)) + expected := EncodeTestKey(t, kvDB, codec, ShortToLongKeyFmt(tc.expected)) if !expected.Equal(actual) { t.Errorf("expected tightened start key %s, got %s", expected, actual) } @@ -1475,12 +1479,14 @@ func TestAdjustEndKeyForInterleave(t *testing.T) { for i, tc := range testCases { t.Run(strconv.Itoa(i), func(t *testing.T) { - actual, err := AdjustEndKeyForInterleave(tc.table, tc.index, EncodeTestKey(t, kvDB, ShortToLongKeyFmt(tc.input)), tc.inclusive) + codec := keys.SystemSQLCodec + actual := EncodeTestKey(t, kvDB, codec, ShortToLongKeyFmt(tc.input)) + actual, err := AdjustEndKeyForInterleave(codec, tc.table, tc.index, actual, tc.inclusive) if err != nil { t.Fatal(err) } - expected := EncodeTestKey(t, kvDB, ShortToLongKeyFmt(tc.expected)) + expected := EncodeTestKey(t, kvDB, codec, ShortToLongKeyFmt(tc.expected)) if !expected.Equal(actual) { t.Errorf("expected tightened end key %s, got %s", expected, actual) } diff --git a/pkg/sql/sqlbase/testutils.go b/pkg/sql/sqlbase/testutils.go index 5dbdc6ec0dea..429d4c98f68d 100644 --- a/pkg/sql/sqlbase/testutils.go +++ b/pkg/sql/sqlbase/testutils.go @@ -27,6 +27,7 @@ import ( "github.com/cockroachdb/apd" "github.com/cockroachdb/cockroach/pkg/geo" "github.com/cockroachdb/cockroach/pkg/geo/geopb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -964,7 +965,7 @@ func TestingMakePrimaryIndexKey(desc *TableDescriptor, vals ...interface{}) (roa colIDToRowIndex[index.ColumnIDs[i]] = i } - keyPrefix := MakeIndexKeyPrefix(desc, index.ID) + keyPrefix := MakeIndexKeyPrefix(keys.SystemSQLCodec, desc, index.ID) key, _, err := EncodeIndexKey(desc, index, colIDToRowIndex, datums, keyPrefix) if err != nil { return nil, err diff --git a/pkg/sql/sqlbase/utils_test.go b/pkg/sql/sqlbase/utils_test.go index 9f62301e7549..c4b3db8e9ad6 100644 --- a/pkg/sql/sqlbase/utils_test.go +++ b/pkg/sql/sqlbase/utils_test.go @@ -43,8 +43,8 @@ var tableNames = map[string]bool{ // - 'd' first byte - decimal (ascending) // - NULLASC, NULLDESC, NOTNULLASC, NOTNULLDESC // - PrefixEnd -func EncodeTestKey(tb testing.TB, kvDB *kv.DB, keyStr string) roachpb.Key { - key := keys.SystemSQLCodec.TenantPrefix() +func EncodeTestKey(tb testing.TB, kvDB *kv.DB, codec keys.SQLCodec, keyStr string) roachpb.Key { + key := codec.TenantPrefix() tokens := strings.Split(keyStr, "/") if tokens[0] != "" { panic("missing '/' token at the beginning of long format") diff --git a/pkg/sql/table.go b/pkg/sql/table.go index 1a4300ad7a53..20b344666fa6 100644 --- a/pkg/sql/table.go +++ b/pkg/sql/table.go @@ -956,7 +956,7 @@ func (p *planner) createOrUpdateSchemaChangeJob( if jobExists { spanList = job.Details().(jobspb.SchemaChangeDetails).ResumeSpanList } - span := tableDesc.PrimaryIndexSpan() + span := tableDesc.PrimaryIndexSpan(p.ExecCfg().Codec) for i := len(tableDesc.ClusterVersion.Mutations) + len(spanList); i < len(tableDesc.Mutations); i++ { spanList = append(spanList, jobspb.ResumeSpanList{ diff --git a/pkg/sql/tablewriter_delete.go b/pkg/sql/tablewriter_delete.go index 969fa17a1676..db1c2c8eb023 100644 --- a/pkg/sql/tablewriter_delete.go +++ b/pkg/sql/tablewriter_delete.go @@ -146,7 +146,7 @@ func (td *tableDeleter) deleteAllRowsScan( ctx context.Context, resume roachpb.Span, limit int64, traceKV bool, ) (roachpb.Span, error) { if resume.Key == nil { - resume = td.rd.Helper.TableDesc.PrimaryIndexSpan() + resume = td.rd.Helper.TableDesc.PrimaryIndexSpan(td.rd.Helper.Codec) } var valNeededForCol util.FastIntSet @@ -163,6 +163,7 @@ func (td *tableDeleter) deleteAllRowsScan( ValNeededForCol: valNeededForCol, } if err := rf.Init( + td.rd.Helper.Codec, false, /* reverse */ // TODO(nvanbenschoten): it might make sense to use a FOR_UPDATE locking // strength here. Consider hooking this in to the same knob that will @@ -226,7 +227,7 @@ func (td *tableDeleter) deleteIndexFast( ctx context.Context, idx *sqlbase.IndexDescriptor, resume roachpb.Span, limit int64, traceKV bool, ) (roachpb.Span, error) { if resume.Key == nil { - resume = td.rd.Helper.TableDesc.IndexSpan(idx.ID) + resume = td.rd.Helper.TableDesc.IndexSpan(td.rd.Helper.Codec, idx.ID) } if traceKV { @@ -248,7 +249,7 @@ func (td *tableDeleter) clearIndex(ctx context.Context, idx *sqlbase.IndexDescri return errors.Errorf("unexpected interleaved index %d", idx.ID) } - sp := td.rd.Helper.TableDesc.IndexSpan(idx.ID) + sp := td.rd.Helper.TableDesc.IndexSpan(td.rd.Helper.Codec, idx.ID) // ClearRange cannot be run in a transaction, so create a // non-transactional batch to send the request. @@ -266,7 +267,7 @@ func (td *tableDeleter) deleteIndexScan( ctx context.Context, idx *sqlbase.IndexDescriptor, resume roachpb.Span, limit int64, traceKV bool, ) (roachpb.Span, error) { if resume.Key == nil { - resume = td.rd.Helper.TableDesc.PrimaryIndexSpan() + resume = td.rd.Helper.TableDesc.PrimaryIndexSpan(td.rd.Helper.Codec) } var valNeededForCol util.FastIntSet @@ -283,6 +284,7 @@ func (td *tableDeleter) deleteIndexScan( ValNeededForCol: valNeededForCol, } if err := rf.Init( + td.rd.Helper.Codec, false, /* reverse */ // TODO(nvanbenschoten): it might make sense to use a FOR_UPDATE locking // strength here. Consider hooking this in to the same knob that will diff --git a/pkg/sql/tablewriter_upsert_opt.go b/pkg/sql/tablewriter_upsert_opt.go index a7ce5f786bc3..578aa1bb9bff 100644 --- a/pkg/sql/tablewriter_upsert_opt.go +++ b/pkg/sql/tablewriter_upsert_opt.go @@ -160,7 +160,9 @@ func (tu *optTableUpserter) init( evalCtx.Mon.MakeBoundAccount(), sqlbase.ColTypeInfoFromColDescs(tu.ri.InsertCols), 0, ) - tu.indexKeyPrefix = sqlbase.MakeIndexKeyPrefix(tableDesc.TableDesc(), tableDesc.PrimaryIndex.ID) + tu.indexKeyPrefix = sqlbase.MakeIndexKeyPrefix( + evalCtx.Codec, tableDesc.TableDesc(), tableDesc.PrimaryIndex.ID, + ) if tu.collectRows { tu.resultRow = make(tree.Datums, len(tu.returnCols)) diff --git a/pkg/sql/truncate.go b/pkg/sql/truncate.go index 418a9918bdb0..3ed2a598e59e 100644 --- a/pkg/sql/truncate.go +++ b/pkg/sql/truncate.go @@ -14,6 +14,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/config" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" @@ -406,7 +407,11 @@ func reassignComments( // can even eliminate the need to use a transaction for each chunk at a later // stage if it proves inefficient). func ClearTableDataInChunks( - ctx context.Context, tableDesc *sqlbase.TableDescriptor, db *kv.DB, traceKV bool, + ctx context.Context, + db *kv.DB, + codec keys.SQLCodec, + tableDesc *sqlbase.TableDescriptor, + traceKV bool, ) error { const chunkSize = TableTruncateChunkSize var resume roachpb.Span @@ -420,6 +425,7 @@ func ClearTableDataInChunks( rd, err := row.MakeDeleter( ctx, txn, + codec, sqlbase.NewImmutableTableDescriptor(*tableDesc), nil, nil, diff --git a/pkg/sql/unsplit.go b/pkg/sql/unsplit.go index 43628a565003..627a60229d52 100644 --- a/pkg/sql/unsplit.go +++ b/pkg/sql/unsplit.go @@ -43,7 +43,7 @@ func (n *unsplitNode) Next(params runParams) (bool, error) { } row := n.rows.Values() - rowKey, err := getRowKey(n.tableDesc, n.index, row) + rowKey, err := getRowKey(params.ExecCfg().Codec, n.tableDesc, n.index, row) if err != nil { return false, err }