From 28d171ef85b9cc4d4d4be452e99e0edba5d9bfdd Mon Sep 17 00:00:00 2001 From: Nathan VanBenschoten Date: Thu, 30 Apr 2020 16:12:43 -0400 Subject: [PATCH 1/2] sql: replace a few keys.TODOSQLCodec references with proper codecs These were all pretty easy because there was a ExecutorConfig nearby. --- pkg/sql/crdb_internal.go | 4 ++-- pkg/sql/gcjob/refresh_statuses.go | 2 +- pkg/sql/partition_utils.go | 2 +- pkg/sql/sequence.go | 12 ++++++------ pkg/sql/sequence_select.go | 2 +- pkg/sql/tablewriter_delete.go | 3 +-- 6 files changed, 12 insertions(+), 13 deletions(-) diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index 8d8d48a18232..92646063b18a 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -2213,12 +2213,12 @@ CREATE TABLE crdb_internal.ranges_no_leases ( } var dbName, tableName, indexName string - if _, tableID, err := keys.TODOSQLCodec.DecodeTablePrefix(desc.StartKey.AsRawKey()); err == nil { + if _, tableID, err := p.ExecCfg().Codec.DecodeTablePrefix(desc.StartKey.AsRawKey()); err == nil { parent := parents[tableID] if parent != 0 { tableName = tableNames[tableID] dbName = dbNames[parent] - if _, _, idxID, err := keys.TODOSQLCodec.DecodeIndexPrefix(desc.StartKey.AsRawKey()); err == nil { + if _, _, idxID, err := p.ExecCfg().Codec.DecodeIndexPrefix(desc.StartKey.AsRawKey()); err == nil { indexName = indexNames[tableID][idxID] } } else { diff --git a/pkg/sql/gcjob/refresh_statuses.go b/pkg/sql/gcjob/refresh_statuses.go index 528a49301aed..91d0a29619ec 100644 --- a/pkg/sql/gcjob/refresh_statuses.go +++ b/pkg/sql/gcjob/refresh_statuses.go @@ -273,7 +273,7 @@ func isProtected( func setupConfigWatcher( execCfg *sql.ExecutorConfig, ) (gossip.SystemConfigDeltaFilter, <-chan struct{}) { - k := keys.TODOSQLCodec.IndexPrefix(uint32(keys.ZonesTableID), uint32(keys.ZonesTablePrimaryIndexID)) + k := execCfg.Codec.IndexPrefix(uint32(keys.ZonesTableID), uint32(keys.ZonesTablePrimaryIndexID)) zoneCfgFilter := gossip.MakeSystemConfigDeltaFilter(k) gossipUpdateC := execCfg.Gossip.Deprecated(47150).RegisterSystemConfigChannel() return zoneCfgFilter, gossipUpdateC diff --git a/pkg/sql/partition_utils.go b/pkg/sql/partition_utils.go index 72c10e44cde7..bd734afc15e9 100644 --- a/pkg/sql/partition_utils.go +++ b/pkg/sql/partition_utils.go @@ -135,7 +135,7 @@ func GenerateSubzoneSpans( // NB: This assumes that none of the indexes are interleaved, which is // checked in PartitionDescriptor validation. - sharedPrefix := keys.TODOSQLCodec.TablePrefix(uint32(tableDesc.ID)) + sharedPrefix := codec.TablePrefix(uint32(tableDesc.ID)) var subzoneSpans []zonepb.SubzoneSpan for _, r := range ranges { diff --git a/pkg/sql/sequence.go b/pkg/sql/sequence.go index 804799eccf3b..c540aaeb2807 100644 --- a/pkg/sql/sequence.go +++ b/pkg/sql/sequence.go @@ -50,7 +50,7 @@ func (p *planner) IncrementSequence(ctx context.Context, seqName *tree.TableName rowid := builtins.GenerateUniqueInt(p.EvalContext().NodeID.SQLInstanceID()) val = int64(rowid) } else { - seqValueKey := keys.TODOSQLCodec.SequenceKey(uint32(descriptor.ID)) + seqValueKey := p.ExecCfg().Codec.SequenceKey(uint32(descriptor.ID)) val, err = kv.IncrementValRetryable( ctx, p.txn.DB(), seqValueKey, seqOpts.Increment) if err != nil { @@ -135,7 +135,7 @@ func (p *planner) SetSequenceValue( `cannot set the value of virtual sequence %q`, tree.ErrString(seqName)) } - seqValueKey, newVal, err := MakeSequenceKeyVal(descriptor.TableDesc(), newVal, isCalled) + seqValueKey, newVal, err := MakeSequenceKeyVal(p.ExecCfg().Codec, descriptor.TableDesc(), newVal, isCalled) if err != nil { return err } @@ -149,7 +149,7 @@ func (p *planner) SetSequenceValue( // MakeSequenceKeyVal returns the key and value of a sequence being set // with newVal. func MakeSequenceKeyVal( - sequence *TableDescriptor, newVal int64, isCalled bool, + codec keys.SQLCodec, sequence *TableDescriptor, newVal int64, isCalled bool, ) ([]byte, int64, error) { opts := sequence.SequenceOpts if newVal > opts.MaxValue || newVal < opts.MinValue { @@ -163,18 +163,18 @@ func MakeSequenceKeyVal( newVal = newVal - sequence.SequenceOpts.Increment } - seqValueKey := keys.TODOSQLCodec.SequenceKey(uint32(sequence.ID)) + seqValueKey := codec.SequenceKey(uint32(sequence.ID)) return seqValueKey, newVal, nil } // GetSequenceValue returns the current value of the sequence. func (p *planner) GetSequenceValue( - ctx context.Context, desc *sqlbase.ImmutableTableDescriptor, + ctx context.Context, codec keys.SQLCodec, desc *sqlbase.ImmutableTableDescriptor, ) (int64, error) { if desc.SequenceOpts == nil { return 0, errors.New("descriptor is not a sequence") } - keyValue, err := p.txn.Get(ctx, keys.TODOSQLCodec.SequenceKey(uint32(desc.ID))) + keyValue, err := p.txn.Get(ctx, codec.SequenceKey(uint32(desc.ID))) if err != nil { return 0, err } diff --git a/pkg/sql/sequence_select.go b/pkg/sql/sequence_select.go index a364291e6323..63f5bf790d12 100644 --- a/pkg/sql/sequence_select.go +++ b/pkg/sql/sequence_select.go @@ -46,7 +46,7 @@ func (ss *sequenceSelectNode) Next(params runParams) (bool, error) { if ss.done { return false, nil } - val, err := params.p.GetSequenceValue(params.ctx, ss.desc) + val, err := params.p.GetSequenceValue(params.ctx, params.ExecCfg().Codec, ss.desc) if err != nil { return false, err } diff --git a/pkg/sql/tablewriter_delete.go b/pkg/sql/tablewriter_delete.go index db1c2c8eb023..c93d619ecfa4 100644 --- a/pkg/sql/tablewriter_delete.go +++ b/pkg/sql/tablewriter_delete.go @@ -14,7 +14,6 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/row" @@ -122,7 +121,7 @@ func (td *tableDeleter) deleteAllRowsFast( ctx context.Context, resume roachpb.Span, limit int64, traceKV bool, ) (roachpb.Span, error) { if resume.Key == nil { - tablePrefix := keys.TODOSQLCodec.TablePrefix(uint32(td.rd.Helper.TableDesc.ID)) + tablePrefix := td.rd.Helper.Codec.TablePrefix(uint32(td.rd.Helper.TableDesc.ID)) // Delete rows and indexes starting with the table's prefix. resume = roachpb.Span{ Key: tablePrefix, From f618c80c2fde4e2d7ff3715f3753801919f16ef6 Mon Sep 17 00:00:00 2001 From: Nathan VanBenschoten Date: Mon, 4 May 2020 16:30:52 -0400 Subject: [PATCH 2/2] sql: push tenant-bound SQL codec into descriptor key generation Informs #48123. This commit continues with the plumbing that began an #48190. It pushes a tenant-bound SQL codec into the other main source of key generation in the SQL layer - descriptor manipulation and metadata handling. This allows SQL tenants to properly handle metadata descriptors for its database and tables. This ended up being a larger undertaking than I had originally expected. However, now that it's complete, we're in a pretty good spot: 1. `sqlbase.MetadataSchema` is ready to be used for #47904. 2. we can now run SQL migrations for a non-system tenant 3. there is only one remaining use of TODOSQLCodec in pkg/sql. See #48375. --- pkg/ccl/backupccl/backup_test.go | 2 +- pkg/ccl/backupccl/restore_job.go | 34 ++--- pkg/ccl/backupccl/restore_planning.go | 25 ++-- pkg/ccl/backupccl/show.go | 2 +- pkg/ccl/backupccl/show_test.go | 4 +- pkg/ccl/backupccl/targets.go | 12 +- pkg/ccl/changefeedccl/bench_test.go | 2 +- pkg/ccl/changefeedccl/changefeed_dist.go | 2 +- .../changefeedccl/schemafeed/schema_feed.go | 2 +- pkg/ccl/importccl/import_stmt.go | 23 +-- pkg/ccl/importccl/load_test.go | 9 +- pkg/ccl/importccl/read_import_pgdump.go | 2 +- pkg/ccl/partitionccl/drop_test.go | 4 +- pkg/config/system_test.go | 16 ++- pkg/jobs/registry.go | 3 +- pkg/jobs/registry_test.go | 13 +- pkg/kv/kvserver/client_split_test.go | 10 +- pkg/kv/kvserver/client_test.go | 8 +- .../reports/constraint_stats_report_test.go | 4 +- pkg/kv/kvserver/reports/reporter.go | 2 +- pkg/kv/kvserver/store_test.go | 8 +- pkg/server/node.go | 8 +- pkg/server/node_test.go | 4 +- pkg/server/server_sql.go | 3 + pkg/server/server_test.go | 2 +- pkg/server/testserver.go | 4 +- pkg/sql/alter_table.go | 16 +-- pkg/sql/authorization.go | 3 +- pkg/sql/backfill.go | 16 +-- pkg/sql/check.go | 4 +- pkg/sql/colflow/colbatch_scan_test.go | 2 +- pkg/sql/conn_executor.go | 3 +- pkg/sql/crdb_internal.go | 4 +- pkg/sql/crdb_internal_test.go | 12 +- pkg/sql/create_index.go | 2 +- pkg/sql/create_sequence.go | 4 +- pkg/sql/create_table.go | 22 +-- pkg/sql/create_test.go | 14 +- pkg/sql/create_view.go | 6 +- pkg/sql/database.go | 39 +++--- pkg/sql/database_test.go | 7 +- pkg/sql/delete_test.go | 5 +- pkg/sql/descriptor.go | 68 +++++---- pkg/sql/descriptor_mutation_test.go | 30 ++-- pkg/sql/distsql_physical_planner_test.go | 3 +- pkg/sql/distsql_plan_backfill_test.go | 3 +- pkg/sql/distsql_plan_join_test.go | 4 +- pkg/sql/drop_database.go | 7 +- pkg/sql/drop_index.go | 2 +- pkg/sql/drop_test.go | 56 ++++---- pkg/sql/exec_util.go | 2 +- pkg/sql/flowinfra/cluster_test.go | 2 +- pkg/sql/flowinfra/server_test.go | 2 +- pkg/sql/gcjob/descriptor_utils.go | 7 +- pkg/sql/gcjob/index_garbage_collection.go | 2 +- pkg/sql/gcjob/refresh_statuses.go | 2 +- pkg/sql/gcjob/table_garbage_collection.go | 4 +- pkg/sql/gcjob_test/gc_job_test.go | 12 +- pkg/sql/grant_revoke.go | 10 +- pkg/sql/information_schema.go | 4 +- pkg/sql/join_test.go | 2 +- pkg/sql/lease.go | 23 +-- pkg/sql/lease_internal_test.go | 20 +-- pkg/sql/lease_test.go | 43 +++--- pkg/sql/logical_schema_accessors.go | 11 +- pkg/sql/namespace_test.go | 10 +- pkg/sql/old_foreign_key_desc_test.go | 13 +- pkg/sql/opt_catalog.go | 12 +- pkg/sql/partition_test.go | 5 +- pkg/sql/pg_catalog.go | 2 +- pkg/sql/pgwire_internal_test.go | 3 +- pkg/sql/physical_schema_accessors.go | 42 ++++-- pkg/sql/physicalplan/aggregator_funcs_test.go | 3 +- .../physicalplan/fake_span_resolver_test.go | 2 +- pkg/sql/physicalplan/span_resolver_test.go | 3 +- pkg/sql/privileged_accessor.go | 2 +- pkg/sql/privileged_accessor_test.go | 3 +- pkg/sql/rename_column.go | 2 +- pkg/sql/rename_database.go | 13 +- pkg/sql/rename_index.go | 2 +- pkg/sql/rename_table.go | 10 +- pkg/sql/rename_test.go | 10 +- pkg/sql/resolver.go | 28 ++-- pkg/sql/revert_test.go | 7 +- pkg/sql/row/cascader.go | 8 +- pkg/sql/row/fetcher.go | 2 +- pkg/sql/row/fetcher_mvcc_test.go | 4 +- pkg/sql/row/fetcher_test.go | 12 +- pkg/sql/rowexec/backfiller.go | 13 +- pkg/sql/rowexec/backfiller_test.go | 9 +- .../rowexec/index_skip_table_reader_test.go | 18 +-- pkg/sql/rowexec/indexjoiner_test.go | 5 +- .../rowexec/interleaved_reader_joiner_test.go | 18 +-- pkg/sql/rowexec/joinreader_test.go | 13 +- pkg/sql/rowexec/tablereader_test.go | 8 +- pkg/sql/rowexec/zigzagjoiner_test.go | 25 ++-- pkg/sql/scatter_test.go | 2 +- pkg/sql/schema/schema.go | 10 +- pkg/sql/schema_accessors.go | 9 +- pkg/sql/schema_change_migrations_test.go | 25 ++-- pkg/sql/schema_changer.go | 15 +- pkg/sql/schema_changer_test.go | 131 +++++++++--------- pkg/sql/scrub.go | 6 +- pkg/sql/scrub_test.go | 20 +-- pkg/sql/sem/builtins/builtins.go | 6 +- pkg/sql/set_zone_config.go | 2 +- pkg/sql/span_builder_test.go | 3 +- pkg/sql/sqlbase/keys.go | 24 ++-- pkg/sql/sqlbase/keys_test.go | 20 ++- pkg/sql/sqlbase/metadata.go | 24 ++-- pkg/sql/sqlbase/namespace.go | 49 ++++--- pkg/sql/sqlbase/structured.go | 74 +++++----- pkg/sql/sqlbase/structured_test.go | 8 +- pkg/sql/sqlbase/system.go | 25 ++-- pkg/sql/sqlbase/table.go | 9 +- pkg/sql/sqlbase/table_test.go | 12 +- pkg/sql/sqlbase/testutils.go | 16 ++- pkg/sql/sqlbase/utils_test.go | 2 +- pkg/sql/stats/automatic_stats_test.go | 7 +- pkg/sql/stats/gossip_invalidation_test.go | 3 +- pkg/sql/table.go | 36 +++-- pkg/sql/table_ref_test.go | 5 +- pkg/sql/temporary_schema.go | 31 +++-- pkg/sql/temporary_schema_test.go | 4 +- pkg/sql/tests/hash_sharded_test.go | 7 +- pkg/sql/tests/system_table_test.go | 2 +- pkg/sql/truncate.go | 4 +- pkg/sql/unsplit.go | 4 +- pkg/sql/vars.go | 5 +- pkg/sql/virtual_schema.go | 2 +- pkg/sql/zone_config.go | 12 +- pkg/sql/zone_config_test.go | 2 +- pkg/sqlmigrations/migrations.go | 54 +++++--- pkg/sqlmigrations/migrations_test.go | 9 +- .../localtestcluster/local_test_cluster.go | 5 +- pkg/testutils/testcluster/testcluster_test.go | 2 +- 136 files changed, 966 insertions(+), 713 deletions(-) diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index 89c1f220b6b8..54b05e31cec7 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -1042,7 +1042,7 @@ func TestBackupRestoreResume(t *testing.T) { _, tc, outerDB, dir, cleanupFn := backupRestoreTestSetup(t, multiNode, numAccounts, initNone) defer cleanupFn() - backupTableDesc := sqlbase.GetTableDescriptor(tc.Servers[0].DB(), "data", "bank") + backupTableDesc := sqlbase.GetTableDescriptor(tc.Servers[0].DB(), keys.SystemSQLCodec, "data", "bank") t.Run("backup", func(t *testing.T) { sqlDB := sqlutils.MakeSQLRunner(outerDB.DB) diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index 45aad5d20e8c..eba00080da97 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -448,14 +448,14 @@ func WriteTableDescs( desc.Privileges = sqlbase.NewDefaultPrivilegeDescriptor() } wroteDBs[desc.ID] = desc - if err := sql.WriteNewDescToBatch(ctx, false /* kvTrace */, settings, b, desc.ID, desc); err != nil { + if err := sql.WriteNewDescToBatch(ctx, false /* kvTrace */, settings, b, keys.SystemSQLCodec, desc.ID, desc); err != nil { return err } // Depending on which cluster version we are restoring to, we decide which // namespace table to write the descriptor into. This may cause wrong // behavior if the cluster version is bumped DURING a restore. dKey := sqlbase.MakeDatabaseNameKey(ctx, settings, desc.Name) - b.CPut(dKey.Key(), desc.ID, nil) + b.CPut(dKey.Key(keys.SystemSQLCodec), desc.ID, nil) } for i := range tables { // For full cluster restore, keep privileges as they were. @@ -466,7 +466,7 @@ func WriteTableDescs( tables[i].Privileges = wrote.GetPrivileges() } } else { - parentDB, err := sqlbase.GetDatabaseDescFromID(ctx, txn, tables[i].ParentID) + parentDB, err := sqlbase.GetDatabaseDescFromID(ctx, txn, keys.SystemSQLCodec, tables[i].ParentID) if err != nil { return errors.Wrapf(err, "failed to lookup parent DB %d", errors.Safe(tables[i].ParentID)) @@ -480,14 +480,14 @@ func WriteTableDescs( tables[i].Privileges = parentDB.GetPrivileges() } } - if err := sql.WriteNewDescToBatch(ctx, false /* kvTrace */, settings, b, tables[i].ID, tables[i]); err != nil { + if err := sql.WriteNewDescToBatch(ctx, false /* kvTrace */, settings, b, keys.SystemSQLCodec, tables[i].ID, tables[i]); err != nil { return err } // Depending on which cluster version we are restoring to, we decide which // namespace table to write the descriptor into. This may cause wrong // behavior if the cluster version is bumped DURING a restore. tkey := sqlbase.MakePublicTableNameKey(ctx, settings, tables[i].ParentID, tables[i].Name) - b.CPut(tkey.Key(), tables[i].ID, nil) + b.CPut(tkey.Key(keys.SystemSQLCodec), tables[i].ID, nil) } for _, kv := range extra { b.InitPut(kv.Key, &kv.Value, false) @@ -500,7 +500,7 @@ func WriteTableDescs( } for _, table := range tables { - if err := table.Validate(ctx, txn); err != nil { + if err := table.Validate(ctx, txn, keys.SystemSQLCodec); err != nil { return errors.Wrapf(err, "validate table %d", errors.Safe(table.ID)) } @@ -757,11 +757,11 @@ func restore( // returned an error prior to this. func loadBackupSQLDescs( ctx context.Context, + p sql.PlanHookState, details jobspb.RestoreDetails, - makeExternalStorageFromURI cloud.ExternalStorageFromURIFactory, encryption *roachpb.FileEncryptionOptions, ) ([]BackupManifest, BackupManifest, []sqlbase.Descriptor, error) { - backupManifests, err := loadBackupManifests(ctx, details.URIs, makeExternalStorageFromURI, encryption) + backupManifests, err := loadBackupManifests(ctx, details.URIs, p.ExecCfg().DistSQLSrv.ExternalStorageFromURI, encryption) if err != nil { return nil, BackupManifest{}, nil, err } @@ -770,7 +770,7 @@ func loadBackupSQLDescs( // TODO(lucy, jordan): This should become unnecessary in 20.1 when we stop // writing old-style descs in RestoreDetails (unless a job persists across // an upgrade?). - if err := maybeUpgradeTableDescsInBackupManifests(ctx, backupManifests, true /* skipFKsWithNoMatchingTable */); err != nil { + if err := maybeUpgradeTableDescsInBackupManifests(ctx, backupManifests, p.ExecCfg().Codec, true /* skipFKsWithNoMatchingTable */); err != nil { return nil, BackupManifest{}, nil, err } @@ -951,7 +951,7 @@ func (r *restoreResumer) Resume( p := phs.(sql.PlanHookState) backupManifests, latestBackupManifest, sqlDescs, err := loadBackupSQLDescs( - ctx, details, p.ExecCfg().DistSQLSrv.ExternalStorageFromURI, details.Encryption, + ctx, p, details, details.Encryption, ) if err != nil { return err @@ -1083,12 +1083,12 @@ func (r *restoreResumer) publishTables(ctx context.Context) error { tableDesc := *tbl tableDesc.Version++ tableDesc.State = sqlbase.TableDescriptor_PUBLIC - existingDescVal, err := sqlbase.ConditionalGetTableDescFromTxn(ctx, txn, tbl) + existingDescVal, err := sqlbase.ConditionalGetTableDescFromTxn(ctx, txn, r.execCfg.Codec, tbl) if err != nil { return errors.Wrap(err, "validating table descriptor has not changed") } b.CPut( - sqlbase.MakeDescMetadataKey(tableDesc.ID), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), sqlbase.WrapDescriptor(&tableDesc), existingDescVal, ) @@ -1158,16 +1158,16 @@ func (r *restoreResumer) dropTables(ctx context.Context, jr *jobs.Registry, txn tableDesc := *tbl tableDesc.Version++ tableDesc.State = sqlbase.TableDescriptor_DROP - err := sqlbase.RemovePublicTableNamespaceEntry(ctx, txn, tbl.ParentID, tbl.Name) + err := sqlbase.RemovePublicTableNamespaceEntry(ctx, txn, keys.SystemSQLCodec, tbl.ParentID, tbl.Name) if err != nil { return errors.Wrap(err, "dropping tables caused by restore fail/cancel from public namespace") } - existingDescVal, err := sqlbase.ConditionalGetTableDescFromTxn(ctx, txn, tbl) + existingDescVal, err := sqlbase.ConditionalGetTableDescFromTxn(ctx, txn, r.execCfg.Codec, tbl) if err != nil { return errors.Wrap(err, "dropping tables caused by restore fail/cancel") } b.CPut( - sqlbase.MakeDescMetadataKey(tableDesc.ID), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), sqlbase.WrapDescriptor(&tableDesc), existingDescVal, ) @@ -1213,9 +1213,9 @@ func (r *restoreResumer) dropTables(ctx context.Context, jr *jobs.Registry, txn } if isDBEmpty { - descKey := sqlbase.MakeDescMetadataKey(dbDesc.ID) + descKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, dbDesc.ID) b.Del(descKey) - b.Del(sqlbase.NewDatabaseKey(dbDesc.Name).Key()) + b.Del(sqlbase.NewDatabaseKey(dbDesc.Name).Key(keys.SystemSQLCodec)) } } if err := txn.Run(ctx, b); err != nil { diff --git a/pkg/ccl/backupccl/restore_planning.go b/pkg/ccl/backupccl/restore_planning.go index 16468d88549c..16df9430d4fa 100644 --- a/pkg/ccl/backupccl/restore_planning.go +++ b/pkg/ccl/backupccl/restore_planning.go @@ -224,7 +224,7 @@ func allocateTableRewrites( maxExpectedDB := keys.MinUserDescID + sql.MaxDefaultDescriptorID // Check that any DBs being restored do _not_ exist. for name := range restoreDBNames { - found, foundID, err := sqlbase.LookupDatabaseID(ctx, txn, name) + found, foundID, err := sqlbase.LookupDatabaseID(ctx, txn, p.ExecCfg().Codec, name) if err != nil { return err } @@ -242,11 +242,11 @@ func allocateTableRewrites( } else if descriptorCoverage == tree.AllDescriptors && table.ParentID < sql.MaxDefaultDescriptorID { // This is a table that is in a database that already existed at // cluster creation time. - defaultDBID, err := lookupDatabaseID(ctx, txn, sessiondata.DefaultDatabaseName) + defaultDBID, err := lookupDatabaseID(ctx, txn, p.ExecCfg().Codec, sessiondata.DefaultDatabaseName) if err != nil { return err } - postgresDBID, err := lookupDatabaseID(ctx, txn, sessiondata.PgDatabaseName) + postgresDBID, err := lookupDatabaseID(ctx, txn, p.ExecCfg().Codec, sessiondata.PgDatabaseName) if err != nil { return err } @@ -281,7 +281,7 @@ func allocateTableRewrites( } else { var parentID sqlbase.ID { - found, newParentID, err := sqlbase.LookupDatabaseID(ctx, txn, targetDB) + found, newParentID, err := sqlbase.LookupDatabaseID(ctx, txn, p.ExecCfg().Codec, targetDB) if err != nil { return err } @@ -293,13 +293,13 @@ func allocateTableRewrites( } // Check that the table name is _not_ in use. // This would fail the CPut later anyway, but this yields a prettier error. - if err := CheckTableExists(ctx, txn, parentID, table.Name); err != nil { + if err := CheckTableExists(ctx, txn, p.ExecCfg().Codec, parentID, table.Name); err != nil { return err } // Check privileges. { - parentDB, err := sqlbase.GetDatabaseDescFromID(ctx, txn, parentID) + parentDB, err := sqlbase.GetDatabaseDescFromID(ctx, txn, p.ExecCfg().Codec, parentID) if err != nil { return errors.Wrapf(err, "failed to lookup parent DB %d", errors.Safe(parentID)) @@ -390,7 +390,10 @@ func allocateTableRewrites( // "other" table is missing from the set provided are omitted during the // upgrade, instead of causing an error to be returned. func maybeUpgradeTableDescsInBackupManifests( - ctx context.Context, backupManifests []BackupManifest, skipFKsWithNoMatchingTable bool, + ctx context.Context, + backupManifests []BackupManifest, + codec keys.SQLCodec, + skipFKsWithNoMatchingTable bool, ) error { protoGetter := sqlbase.MapProtoGetter{ Protos: make(map[interface{}]protoutil.Message), @@ -400,7 +403,7 @@ func maybeUpgradeTableDescsInBackupManifests( for _, backupManifest := range backupManifests { for _, desc := range backupManifest.Descriptors { if table := desc.Table(hlc.Timestamp{}); table != nil { - protoGetter.Protos[string(sqlbase.MakeDescMetadataKey(table.ID))] = + protoGetter.Protos[string(sqlbase.MakeDescMetadataKey(codec, table.ID))] = sqlbase.WrapDescriptor(protoutil.Clone(table).(*sqlbase.TableDescriptor)) } } @@ -410,7 +413,7 @@ func maybeUpgradeTableDescsInBackupManifests( backupManifest := &backupManifests[i] for j := range backupManifest.Descriptors { if table := backupManifest.Descriptors[j].Table(hlc.Timestamp{}); table != nil { - if _, err := table.MaybeUpgradeForeignKeyRepresentation(ctx, protoGetter, skipFKsWithNoMatchingTable); err != nil { + if _, err := table.MaybeUpgradeForeignKeyRepresentation(ctx, protoGetter, codec, skipFKsWithNoMatchingTable); err != nil { return err } // TODO(lucy): Is this necessary? @@ -713,7 +716,7 @@ func doRestorePlan( // Ensure that no user table descriptors exist for a full cluster restore. txn := p.ExecCfg().DB.NewTxn(ctx, "count-user-descs") - descCount, err := sql.CountUserDescriptors(ctx, txn) + descCount, err := sql.CountUserDescriptors(ctx, txn, p.ExecCfg().Codec) if err != nil { return errors.Wrap(err, "looking up user descriptors during restore") } @@ -725,7 +728,7 @@ func doRestorePlan( } _, skipMissingFKs := opts[restoreOptSkipMissingFKs] - if err := maybeUpgradeTableDescsInBackupManifests(ctx, mainBackupManifests, skipMissingFKs); err != nil { + if err := maybeUpgradeTableDescsInBackupManifests(ctx, mainBackupManifests, p.ExecCfg().Codec, skipMissingFKs); err != nil { return err } diff --git a/pkg/ccl/backupccl/show.go b/pkg/ccl/backupccl/show.go index 66a771900f4d..97988d98c555 100644 --- a/pkg/ccl/backupccl/show.go +++ b/pkg/ccl/backupccl/show.go @@ -135,7 +135,7 @@ func showBackupPlanHook( // display them anyway, because we don't have the referenced table names, // etc. if err := maybeUpgradeTableDescsInBackupManifests( - ctx, manifests, true, /*skipFKsWithNoMatchingTable*/ + ctx, manifests, p.ExecCfg().Codec, true, /*skipFKsWithNoMatchingTable*/ ); err != nil { return err } diff --git a/pkg/ccl/backupccl/show_test.go b/pkg/ccl/backupccl/show_test.go index 221e6051943a..17e72b11fc87 100644 --- a/pkg/ccl/backupccl/show_test.go +++ b/pkg/ccl/backupccl/show_test.go @@ -99,8 +99,8 @@ func TestShowBackup(t *testing.T) { sqlDB.Exec(t, `CREATE TABLE data.details2()`) sqlDB.Exec(t, `BACKUP data.details1, data.details2 TO $1;`, details) - details1Desc := sqlbase.GetTableDescriptor(tc.Server(0).DB(), "data", "details1") - details2Desc := sqlbase.GetTableDescriptor(tc.Server(0).DB(), "data", "details2") + details1Desc := sqlbase.GetTableDescriptor(tc.Server(0).DB(), keys.SystemSQLCodec, "data", "details1") + details2Desc := sqlbase.GetTableDescriptor(tc.Server(0).DB(), keys.SystemSQLCodec, "data", "details2") details1Key := roachpb.Key(sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, details1Desc, details1Desc.PrimaryIndex.ID)) details2Key := roachpb.Key(sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, details2Desc, details2Desc.PrimaryIndex.ID)) diff --git a/pkg/ccl/backupccl/targets.go b/pkg/ccl/backupccl/targets.go index 76bd5de8b297..d56a01356d08 100644 --- a/pkg/ccl/backupccl/targets.go +++ b/pkg/ccl/backupccl/targets.go @@ -598,8 +598,10 @@ func fullClusterTargets( return fullClusterDescs, fullClusterDBs, nil } -func lookupDatabaseID(ctx context.Context, txn *kv.Txn, name string) (sqlbase.ID, error) { - found, id, err := sqlbase.LookupDatabaseID(ctx, txn, name) +func lookupDatabaseID( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, name string, +) (sqlbase.ID, error) { + found, id, err := sqlbase.LookupDatabaseID(ctx, txn, codec, name) if err != nil { return sqlbase.InvalidID, err } @@ -611,8 +613,10 @@ func lookupDatabaseID(ctx context.Context, txn *kv.Txn, name string) (sqlbase.ID // CheckTableExists returns an error if a table already exists with given // parent and name. -func CheckTableExists(ctx context.Context, txn *kv.Txn, parentID sqlbase.ID, name string) error { - found, _, err := sqlbase.LookupPublicTableID(ctx, txn, parentID, name) +func CheckTableExists( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, parentID sqlbase.ID, name string, +) error { + found, _, err := sqlbase.LookupPublicTableID(ctx, txn, codec, parentID, name) if err != nil { return err } diff --git a/pkg/ccl/changefeedccl/bench_test.go b/pkg/ccl/changefeedccl/bench_test.go index 24f821b1826f..b9e564fcb264 100644 --- a/pkg/ccl/changefeedccl/bench_test.go +++ b/pkg/ccl/changefeedccl/bench_test.go @@ -184,7 +184,7 @@ func createBenchmarkChangefeed( feedClock *hlc.Clock, database, table string, ) (*benchSink, func() error, error) { - tableDesc := sqlbase.GetTableDescriptor(s.DB(), database, table) + tableDesc := sqlbase.GetTableDescriptor(s.DB(), keys.SystemSQLCodec, database, table) spans := []roachpb.Span{tableDesc.PrimaryIndexSpan(keys.SystemSQLCodec)} details := jobspb.ChangefeedDetails{ Targets: jobspb.ChangefeedTargets{tableDesc.ID: jobspb.ChangefeedTarget{ diff --git a/pkg/ccl/changefeedccl/changefeed_dist.go b/pkg/ccl/changefeedccl/changefeed_dist.go index d86bf7a5c851..30114069c32f 100644 --- a/pkg/ccl/changefeedccl/changefeed_dist.go +++ b/pkg/ccl/changefeedccl/changefeed_dist.go @@ -225,7 +225,7 @@ func fetchSpansForTargets( txn.SetFixedTimestamp(ctx, ts) // Note that all targets are currently guaranteed to be tables. for tableID := range targets { - tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, tableID) + tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, codec, tableID) if err != nil { return err } diff --git a/pkg/ccl/changefeedccl/schemafeed/schema_feed.go b/pkg/ccl/changefeedccl/schemafeed/schema_feed.go index 2c0768aa5c61..e3a6b72c7012 100644 --- a/pkg/ccl/changefeedccl/schemafeed/schema_feed.go +++ b/pkg/ccl/changefeedccl/schemafeed/schema_feed.go @@ -190,7 +190,7 @@ func (tf *SchemaFeed) primeInitialTableDescs(ctx context.Context) error { txn.SetFixedTimestamp(ctx, initialTableDescTs) // Note that all targets are currently guaranteed to be tables. for tableID := range tf.targets { - tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, tableID) + tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, keys.SystemSQLCodec, tableID) if err != nil { return err } diff --git a/pkg/ccl/importccl/import_stmt.go b/pkg/ccl/importccl/import_stmt.go index 7d1309c0e3b2..ebd3939838d3 100644 --- a/pkg/ccl/importccl/import_stmt.go +++ b/pkg/ccl/importccl/import_stmt.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -857,7 +858,7 @@ func prepareNewTableDescsForIngestion( ) ([]*sqlbase.TableDescriptor, error) { var tableDescs []*sqlbase.TableDescriptor for _, i := range tables { - if err := backupccl.CheckTableExists(ctx, txn, parentID, i.Desc.Name); err != nil { + if err := backupccl.CheckTableExists(ctx, txn, p.ExecCfg().Codec, parentID, i.Desc.Name); err != nil { return nil, err } tableDescs = append(tableDescs, i.Desc) @@ -894,7 +895,7 @@ func prepareNewTableDescsForIngestion( var seqValKVs []roachpb.KeyValue for i := range tableDescs { if v, ok := seqVals[tableDescs[i].ID]; ok && v != 0 { - key, val, err := sql.MakeSequenceKeyVal(tableDescs[i], v, false) + key, val, err := sql.MakeSequenceKeyVal(p.ExecCfg().Codec, tableDescs[i], v, false) if err != nil { return nil, err } @@ -916,7 +917,7 @@ func prepareNewTableDescsForIngestion( // Prepares descriptors for existing tables being imported into. func prepareExistingTableDescForIngestion( - ctx context.Context, txn *kv.Txn, desc *sqlbase.TableDescriptor, p sql.PlanHookState, + ctx context.Context, txn *kv.Txn, execCfg *sql.ExecutorConfig, desc *sqlbase.TableDescriptor, ) (*sqlbase.TableDescriptor, error) { if len(desc.Mutations) > 0 { return nil, errors.Errorf("cannot IMPORT INTO a table with schema changes in progress -- try again later (pending mutation %s)", desc.Mutations[0].String()) @@ -940,12 +941,12 @@ func prepareExistingTableDescForIngestion( // upgrade and downgrade, because IMPORT does not operate in mixed-version // states. // TODO(jordan,lucy): remove this comment once 19.2 is released. - existingDesc, err := sqlbase.ConditionalGetTableDescFromTxn(ctx, txn, desc) + existingDesc, err := sqlbase.ConditionalGetTableDescFromTxn(ctx, txn, execCfg.Codec, desc) if err != nil { return nil, errors.Wrap(err, "another operation is currently operating on the table") } err = txn.CPut(ctx, - sqlbase.MakeDescMetadataKey(desc.ID), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, desc.ID), sqlbase.WrapDescriptor(&importing), existingDesc) if err != nil { @@ -978,7 +979,7 @@ func (r *importResumer) prepareTableDescsForIngestion( var desc *sqlbase.TableDescriptor for i, table := range details.Tables { if !table.IsNew { - desc, err = prepareExistingTableDescForIngestion(ctx, txn, table.Desc, p) + desc, err = prepareExistingTableDescForIngestion(ctx, txn, p.ExecCfg(), table.Desc) if err != nil { return err } @@ -1216,12 +1217,12 @@ func (r *importResumer) publishTables(ctx context.Context, execCfg *sql.Executor // upgrade and downgrade, because IMPORT does not operate in mixed-version // states. // TODO(jordan,lucy): remove this comment once 19.2 is released. - existingDesc, err := sqlbase.ConditionalGetTableDescFromTxn(ctx, txn, tbl.Desc) + existingDesc, err := sqlbase.ConditionalGetTableDescFromTxn(ctx, txn, execCfg.Codec, tbl.Desc) if err != nil { return errors.Wrap(err, "publishing tables") } b.CPut( - sqlbase.MakeDescMetadataKey(tableDesc.ID), + sqlbase.MakeDescMetadataKey(execCfg.Codec, tableDesc.ID), sqlbase.WrapDescriptor(&tableDesc), existingDesc) } @@ -1347,7 +1348,7 @@ func (r *importResumer) dropTables( // possible. This is safe since the table data was never visible to users, // and so we don't need to preserve MVCC semantics. tableDesc.DropTime = dropTime - if err := sqlbase.RemovePublicTableNamespaceEntry(ctx, txn, tableDesc.ParentID, tableDesc.Name); err != nil { + if err := sqlbase.RemovePublicTableNamespaceEntry(ctx, txn, execCfg.Codec, tableDesc.ParentID, tableDesc.Name); err != nil { return err } } else { @@ -1358,12 +1359,12 @@ func (r *importResumer) dropTables( // upgrade and downgrade, because IMPORT does not operate in mixed-version // states. // TODO(jordan,lucy): remove this comment once 19.2 is released. - existingDesc, err := sqlbase.ConditionalGetTableDescFromTxn(ctx, txn, tbl.Desc) + existingDesc, err := sqlbase.ConditionalGetTableDescFromTxn(ctx, txn, execCfg.Codec, tbl.Desc) if err != nil { return errors.Wrap(err, "rolling back tables") } b.CPut( - sqlbase.MakeDescMetadataKey(tableDesc.ID), + sqlbase.MakeDescMetadataKey(execCfg.Codec, tableDesc.ID), sqlbase.WrapDescriptor(&tableDesc), existingDesc) } diff --git a/pkg/ccl/importccl/load_test.go b/pkg/ccl/importccl/load_test.go index 5471c15655b3..9fb0f96761b4 100644 --- a/pkg/ccl/importccl/load_test.go +++ b/pkg/ccl/importccl/load_test.go @@ -18,6 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/ccl/importccl" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/tests" @@ -61,11 +62,11 @@ func TestGetDescriptorFromDB(t *testing.T) { return err } batch := txn.NewBatch() - batch.Put(sqlbase.NewDatabaseKey("bob").Key(), 9999) - batch.Put(sqlbase.NewDeprecatedDatabaseKey("alice").Key(), 10000) + batch.Put(sqlbase.NewDatabaseKey("bob").Key(keys.SystemSQLCodec), 9999) + batch.Put(sqlbase.NewDeprecatedDatabaseKey("alice").Key(keys.SystemSQLCodec), 10000) - batch.Put(sqlbase.MakeDescMetadataKey(9999), sqlbase.WrapDescriptor(bobDesc)) - batch.Put(sqlbase.MakeDescMetadataKey(10000), sqlbase.WrapDescriptor(aliceDesc)) + batch.Put(sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, 9999), sqlbase.WrapDescriptor(bobDesc)) + batch.Put(sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, 10000), sqlbase.WrapDescriptor(aliceDesc)) return txn.CommitInBatch(ctx, batch) }) require.NoError(t, err) diff --git a/pkg/ccl/importccl/read_import_pgdump.go b/pkg/ccl/importccl/read_import_pgdump.go index 3bb6f9d12171..e7cb26f475b9 100644 --- a/pkg/ccl/importccl/read_import_pgdump.go +++ b/pkg/ccl/importccl/read_import_pgdump.go @@ -635,7 +635,7 @@ func (m *pgDumpReader) readFile( if seq == nil { break } - key, val, err := sql.MakeSequenceKeyVal(seq.Desc, val, isCalled) + key, val, err := sql.MakeSequenceKeyVal(keys.TODOSQLCodec, seq.Desc, val, isCalled) if err != nil { return wrapRowErr(err, "", count, pgcode.Uncategorized, "") } diff --git a/pkg/ccl/partitionccl/drop_test.go b/pkg/ccl/partitionccl/drop_test.go index 0ba08e9d1711..a8757cc99da5 100644 --- a/pkg/ccl/partitionccl/drop_test.go +++ b/pkg/ccl/partitionccl/drop_test.go @@ -67,7 +67,7 @@ func TestDropIndexWithZoneConfigCCL(t *testing.T) { PARTITION p1 VALUES IN (1), PARTITION p2 VALUES IN (2) )`) - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") indexDesc, _, err := tableDesc.FindIndexByName("i") if err != nil { t.Fatal(err) @@ -114,7 +114,7 @@ func TestDropIndexWithZoneConfigCCL(t *testing.T) { t.Fatalf(`zone config for %s still exists`, target) } } - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") if _, _, err := tableDesc.FindIndexByName("i"); err == nil { t.Fatalf("table descriptor still contains index after index is dropped") } diff --git a/pkg/config/system_test.go b/pkg/config/system_test.go index 94927f2e4121..decf8c753937 100644 --- a/pkg/config/system_test.go +++ b/pkg/config/system_test.go @@ -49,7 +49,7 @@ func sqlKV(tableID uint32, indexID, descriptorID uint64) roachpb.KeyValue { } func descriptor(descriptorID uint64) roachpb.KeyValue { - return kv(sqlbase.MakeDescMetadataKey(sqlbase.ID(descriptorID)), nil) + return kv(sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(descriptorID)), nil) } func zoneConfig(descriptorID uint32, spans ...zonepb.SubzoneSpan) roachpb.KeyValue { @@ -162,7 +162,7 @@ func TestGetLargestID(t *testing.T) { // Real SQL layout. func() testCase { - ms := sqlbase.MakeMetadataSchema(zonepb.DefaultZoneConfigRef(), zonepb.DefaultSystemZoneConfigRef()) + ms := sqlbase.MakeMetadataSchema(keys.SystemSQLCodec, zonepb.DefaultZoneConfigRef(), zonepb.DefaultSystemZoneConfigRef()) descIDs := ms.DescriptorIDs() maxDescID := descIDs[len(descIDs)-1] kvs, _ /* splits */ := ms.GetInitialValues(clusterversion.TestingClusterVersion) @@ -258,7 +258,9 @@ func TestComputeSplitKeySystemRanges(t *testing.T) { } cfg := config.NewSystemConfig(zonepb.DefaultZoneConfigRef()) - kvs, _ /* splits */ := sqlbase.MakeMetadataSchema(cfg.DefaultZoneConfig, zonepb.DefaultSystemZoneConfigRef()).GetInitialValues(clusterversion.TestingClusterVersion) + kvs, _ /* splits */ := sqlbase.MakeMetadataSchema( + keys.SystemSQLCodec, cfg.DefaultZoneConfig, zonepb.DefaultSystemZoneConfigRef(), + ).GetInitialValues(clusterversion.TestingClusterVersion) cfg.SystemConfigEntries = config.SystemConfigEntries{ Values: kvs, } @@ -288,7 +290,9 @@ func TestComputeSplitKeyTableIDs(t *testing.T) { // separately above. minKey := roachpb.RKey(keys.TimeseriesPrefix.PrefixEnd()) - schema := sqlbase.MakeMetadataSchema(zonepb.DefaultZoneConfigRef(), zonepb.DefaultSystemZoneConfigRef()) + schema := sqlbase.MakeMetadataSchema( + keys.SystemSQLCodec, zonepb.DefaultZoneConfigRef(), zonepb.DefaultSystemZoneConfigRef(), + ) // Real system tables only. baseSql, _ /* splits */ := schema.GetInitialValues(clusterversion.TestingClusterVersion) // Real system tables plus some user stuff. @@ -434,7 +438,9 @@ func TestGetZoneConfigForKey(t *testing.T) { }() cfg := config.NewSystemConfig(zonepb.DefaultZoneConfigRef()) - kvs, _ /* splits */ := sqlbase.MakeMetadataSchema(cfg.DefaultZoneConfig, zonepb.DefaultSystemZoneConfigRef()).GetInitialValues(clusterversion.TestingClusterVersion) + kvs, _ /* splits */ := sqlbase.MakeMetadataSchema( + keys.SystemSQLCodec, cfg.DefaultZoneConfig, zonepb.DefaultSystemZoneConfigRef(), + ).GetInitialValues(clusterversion.TestingClusterVersion) cfg.SystemConfigEntries = config.SystemConfigEntries{ Values: kvs, } diff --git a/pkg/jobs/registry.go b/pkg/jobs/registry.go index 7da7139072c0..993e51144a0e 100644 --- a/pkg/jobs/registry.go +++ b/pkg/jobs/registry.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagepb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -564,7 +565,7 @@ func (r *Registry) isOrphaned(ctx context.Context, payload *jobspb.Payload) (boo for _, id := range payload.DescriptorIDs { pendingMutations := false if err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - td, err := sqlbase.GetTableDescFromID(ctx, txn, id) + td, err := sqlbase.GetTableDescFromID(ctx, txn, keys.TODOSQLCodec, id) if err != nil { return err } diff --git a/pkg/jobs/registry_test.go b/pkg/jobs/registry_test.go index 85fb50e168ad..524dd35c1f4a 100644 --- a/pkg/jobs/registry_test.go +++ b/pkg/jobs/registry_test.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -207,11 +208,11 @@ func TestRegistryGC(t *testing.T) { muchEarlier := ts.Add(-2 * time.Hour) setMutations := func(mutations []sqlbase.DescriptorMutation) sqlbase.ID { - desc := sqlbase.GetTableDescriptor(kvDB, "t", "to_be_mutated") + desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "to_be_mutated") desc.Mutations = mutations if err := kvDB.Put( context.TODO(), - sqlbase.MakeDescMetadataKey(desc.GetID()), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()), sqlbase.WrapDescriptor(desc), ); err != nil { t.Fatal(err) @@ -220,11 +221,11 @@ func TestRegistryGC(t *testing.T) { } setGCMutations := func(gcMutations []sqlbase.TableDescriptor_GCDescriptorMutation) sqlbase.ID { - desc := sqlbase.GetTableDescriptor(kvDB, "t", "to_be_mutated") + desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "to_be_mutated") desc.GCMutations = gcMutations if err := kvDB.Put( context.TODO(), - sqlbase.MakeDescMetadataKey(desc.GetID()), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()), sqlbase.WrapDescriptor(desc), ); err != nil { t.Fatal(err) @@ -233,7 +234,7 @@ func TestRegistryGC(t *testing.T) { } setDropJob := func(shouldDrop bool) sqlbase.ID { - desc := sqlbase.GetTableDescriptor(kvDB, "t", "to_be_mutated") + desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "to_be_mutated") if shouldDrop { desc.DropJobID = 123 } else { @@ -242,7 +243,7 @@ func TestRegistryGC(t *testing.T) { } if err := kvDB.Put( context.TODO(), - sqlbase.MakeDescMetadataKey(desc.GetID()), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()), sqlbase.WrapDescriptor(desc), ); err != nil { t.Fatal(err) diff --git a/pkg/kv/kvserver/client_split_test.go b/pkg/kv/kvserver/client_split_test.go index 0e1ff0888053..c3bbec694f17 100644 --- a/pkg/kv/kvserver/client_split_test.go +++ b/pkg/kv/kvserver/client_split_test.go @@ -263,7 +263,7 @@ func TestStoreRangeSplitAtTablePrefix(t *testing.T) { return err } // We don't care about the values, just the keys. - k := sqlbase.MakeDescMetadataKey(sqlbase.ID(keys.MinUserDescID)) + k := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(keys.MinUserDescID)) return txn.Put(ctx, k, &desc) }); err != nil { t.Fatal(err) @@ -1279,7 +1279,9 @@ func TestStoreRangeSystemSplits(t *testing.T) { userTableMax := keys.MinUserDescID + 4 var exceptions map[int]struct{} - schema := sqlbase.MakeMetadataSchema(zonepb.DefaultZoneConfigRef(), zonepb.DefaultSystemZoneConfigRef()) + schema := sqlbase.MakeMetadataSchema( + keys.SystemSQLCodec, zonepb.DefaultZoneConfigRef(), zonepb.DefaultSystemZoneConfigRef(), + ) // Write table descriptors for the tables in the metadata schema as well as // five dummy user tables. This does two things: // - descriptor IDs are used to determine split keys @@ -1301,7 +1303,7 @@ func TestStoreRangeSystemSplits(t *testing.T) { } for i := keys.MinUserDescID; i <= userTableMax; i++ { // We don't care about the value, just the key. - key := sqlbase.MakeDescMetadataKey(sqlbase.ID(i)) + key := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(i)) if err := txn.Put(ctx, key, &sqlbase.Descriptor{}); err != nil { return err } @@ -1365,7 +1367,7 @@ func TestStoreRangeSystemSplits(t *testing.T) { } // This time, only write the last table descriptor. Splits only occur for // the descriptor we add. We don't care about the value, just the key. - k := sqlbase.MakeDescMetadataKey(sqlbase.ID(userTableMax)) + k := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(userTableMax)) return txn.Put(ctx, k, &sqlbase.Descriptor{}) }); err != nil { t.Fatal(err) diff --git a/pkg/kv/kvserver/client_test.go b/pkg/kv/kvserver/client_test.go index 679093d65096..d7f48fd02fc1 100644 --- a/pkg/kv/kvserver/client_test.go +++ b/pkg/kv/kvserver/client_test.go @@ -187,7 +187,9 @@ func createTestStoreWithOpts( var kvs []roachpb.KeyValue var splits []roachpb.RKey bootstrapVersion := clusterversion.TestingClusterVersion - kvs, tableSplits := sqlbase.MakeMetadataSchema(storeCfg.DefaultZoneConfig, storeCfg.DefaultSystemZoneConfig).GetInitialValues(bootstrapVersion) + kvs, tableSplits := sqlbase.MakeMetadataSchema( + keys.SystemSQLCodec, storeCfg.DefaultZoneConfig, storeCfg.DefaultSystemZoneConfig, + ).GetInitialValues(bootstrapVersion) if !opts.dontCreateSystemRanges { splits = config.StaticSplits() splits = append(splits, tableSplits...) @@ -891,7 +893,9 @@ func (m *multiTestContext) addStore(idx int) { // Bootstrap the initial range on the first engine. var splits []roachpb.RKey bootstrapVersion := clusterversion.TestingClusterVersion - kvs, tableSplits := sqlbase.MakeMetadataSchema(cfg.DefaultZoneConfig, cfg.DefaultSystemZoneConfig).GetInitialValues(bootstrapVersion) + kvs, tableSplits := sqlbase.MakeMetadataSchema( + keys.SystemSQLCodec, cfg.DefaultZoneConfig, cfg.DefaultSystemZoneConfig, + ).GetInitialValues(bootstrapVersion) if !m.startWithSingleRange { splits = config.StaticSplits() splits = append(splits, tableSplits...) diff --git a/pkg/kv/kvserver/reports/constraint_stats_report_test.go b/pkg/kv/kvserver/reports/constraint_stats_report_test.go index 22d0893c9fd1..9ce2603c4969 100644 --- a/pkg/kv/kvserver/reports/constraint_stats_report_test.go +++ b/pkg/kv/kvserver/reports/constraint_stats_report_test.go @@ -1073,7 +1073,7 @@ func (b *systemConfigBuilder) addTableDesc(id int, tableDesc sqlbase.TableDescri panic(fmt.Sprintf("parent not set for table %q", tableDesc.Name)) } // Write the table to the SystemConfig, in the descriptors table. - k := sqlbase.MakeDescMetadataKey(sqlbase.ID(id)) + k := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(id)) desc := &sqlbase.Descriptor{ Union: &sqlbase.Descriptor_Table{ Table: &tableDesc, @@ -1092,7 +1092,7 @@ func (b *systemConfigBuilder) addTableDesc(id int, tableDesc sqlbase.TableDescri // addTableDesc adds a database descriptor to the SystemConfig. func (b *systemConfigBuilder) addDBDesc(id int, dbDesc sqlbase.DatabaseDescriptor) { // Write the table to the SystemConfig, in the descriptors table. - k := sqlbase.MakeDescMetadataKey(sqlbase.ID(id)) + k := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(id)) desc := &sqlbase.Descriptor{ Union: &sqlbase.Descriptor_Database{ Database: &dbDesc, diff --git a/pkg/kv/kvserver/reports/reporter.go b/pkg/kv/kvserver/reports/reporter.go index 23b8dc8fd2ab..edaa658aca4a 100644 --- a/pkg/kv/kvserver/reports/reporter.go +++ b/pkg/kv/kvserver/reports/reporter.go @@ -431,7 +431,7 @@ func visitAncestors( ) (bool, error) { // Check to see if it's a table. If so, inherit from the database. // For all other cases, inherit from the default. - descVal := cfg.GetValue(sqlbase.MakeDescMetadataKey(sqlbase.ID(id))) + descVal := cfg.GetValue(sqlbase.MakeDescMetadataKey(keys.TODOSQLCodec, sqlbase.ID(id))) if descVal == nil { // Couldn't find a descriptor. This is not expected to happen. // Let's just look at the default zone config. diff --git a/pkg/kv/kvserver/store_test.go b/pkg/kv/kvserver/store_test.go index 11a1781013e0..ce61577aae29 100644 --- a/pkg/kv/kvserver/store_test.go +++ b/pkg/kv/kvserver/store_test.go @@ -240,7 +240,9 @@ func createTestStoreWithoutStart( } var splits []roachpb.RKey bootstrapVersion := clusterversion.TestingClusterVersion - kvs, tableSplits := sqlbase.MakeMetadataSchema(cfg.DefaultZoneConfig, cfg.DefaultSystemZoneConfig).GetInitialValues(bootstrapVersion) + kvs, tableSplits := sqlbase.MakeMetadataSchema( + keys.SystemSQLCodec, cfg.DefaultZoneConfig, cfg.DefaultSystemZoneConfig, + ).GetInitialValues(bootstrapVersion) if opts.createSystemRanges { splits = config.StaticSplits() splits = append(splits, tableSplits...) @@ -455,7 +457,9 @@ func TestStoreInitAndBootstrap(t *testing.T) { // Bootstrap the system ranges. var splits []roachpb.RKey bootstrapVersion := clusterversion.TestingClusterVersion - kvs, tableSplits := sqlbase.MakeMetadataSchema(cfg.DefaultZoneConfig, cfg.DefaultSystemZoneConfig).GetInitialValues(bootstrapVersion) + kvs, tableSplits := sqlbase.MakeMetadataSchema( + keys.SystemSQLCodec, cfg.DefaultZoneConfig, cfg.DefaultSystemZoneConfig, + ).GetInitialValues(bootstrapVersion) splits = config.StaticSplits() splits = append(splits, tableSplits...) sort.Slice(splits, func(i, j int) bool { diff --git a/pkg/server/node.go b/pkg/server/node.go index 514be3a1e7b6..540a31f4a5e5 100644 --- a/pkg/server/node.go +++ b/pkg/server/node.go @@ -188,9 +188,11 @@ func allocateStoreIDs( // GetBootstrapSchema returns the schema which will be used to bootstrap a new // server. func GetBootstrapSchema( - defaultZoneConfig *zonepb.ZoneConfig, defaultSystemZoneConfig *zonepb.ZoneConfig, + codec keys.SQLCodec, + defaultZoneConfig *zonepb.ZoneConfig, + defaultSystemZoneConfig *zonepb.ZoneConfig, ) sqlbase.MetadataSchema { - return sqlbase.MakeMetadataSchema(defaultZoneConfig, defaultSystemZoneConfig) + return sqlbase.MakeMetadataSchema(codec, defaultZoneConfig, defaultSystemZoneConfig) } // bootstrapCluster initializes the passed-in engines for a new cluster. @@ -246,7 +248,7 @@ func bootstrapCluster( // not create the range, just its data. Only do this if this is the // first store. if i == 0 { - schema := GetBootstrapSchema(defaultZoneConfig, defaultSystemZoneConfig) + schema := GetBootstrapSchema(keys.SystemSQLCodec, defaultZoneConfig, defaultSystemZoneConfig) initialValues, tableSplits := schema.GetInitialValues(bootstrapVersion) splits := append(config.StaticSplits(), tableSplits...) sort.Slice(splits, func(i, j int) bool { diff --git a/pkg/server/node_test.go b/pkg/server/node_test.go index 8204d9524b63..f9e6d1b153d7 100644 --- a/pkg/server/node_test.go +++ b/pkg/server/node_test.go @@ -89,7 +89,9 @@ func TestBootstrapCluster(t *testing.T) { } // Add the initial keys for sql. - kvs, tableSplits := GetBootstrapSchema(zonepb.DefaultZoneConfigRef(), zonepb.DefaultSystemZoneConfigRef()).GetInitialValues(clusterversion.TestingClusterVersion) + kvs, tableSplits := GetBootstrapSchema( + keys.SystemSQLCodec, zonepb.DefaultZoneConfigRef(), zonepb.DefaultSystemZoneConfigRef(), + ).GetInitialValues(clusterversion.TestingClusterVersion) for _, kv := range kvs { expectedKeys = append(expectedKeys, kv.Key) } diff --git a/pkg/server/server_sql.go b/pkg/server/server_sql.go index f47c5c459a29..5aeb6c4f3f72 100644 --- a/pkg/server/server_sql.go +++ b/pkg/server/server_sql.go @@ -227,6 +227,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*sqlServer, error) { cfg.clock, cfg.circularInternalExecutor, cfg.Settings, + codec, lmKnobs, cfg.stopper, cfg.LeaseManagerConfig, @@ -540,6 +541,7 @@ func newSQLServer(ctx context.Context, cfg sqlServerArgs) (*sqlServer, error) { temporaryObjectCleaner := sql.NewTemporaryObjectCleaner( cfg.Settings, cfg.db, + codec, cfg.registry, distSQLServer.ServerConfig.SessionBoundInternalExecutorFactory, cfg.statusServer, @@ -606,6 +608,7 @@ func (s *sqlServer) start( migMgr := sqlmigrations.NewManager( stopper, s.execCfg.DB, + s.execCfg.Codec, &migrationsExecutor, s.execCfg.Clock, mmKnobs, diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go index 9173cbc58a10..db7ed951f797 100644 --- a/pkg/server/server_test.go +++ b/pkg/server/server_test.go @@ -545,7 +545,7 @@ func TestSystemConfigGossip(t *testing.T) { ts := s.(*TestServer) ctx := context.TODO() - key := sqlbase.MakeDescMetadataKey(keys.MaxReservedDescID) + key := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, keys.MaxReservedDescID) valAt := func(i int) *sqlbase.DatabaseDescriptor { return &sqlbase.DatabaseDescriptor{Name: "foo", ID: sqlbase.ID(i)} } diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index e5490fe91160..023bdd0fba9a 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -586,7 +586,9 @@ func (ts *TestServer) ExpectedInitialRangeCount() (int, error) { func ExpectedInitialRangeCount( db *kv.DB, defaultZoneConfig *zonepb.ZoneConfig, defaultSystemZoneConfig *zonepb.ZoneConfig, ) (int, error) { - descriptorIDs, err := sqlmigrations.ExpectedDescriptorIDs(context.Background(), db, defaultZoneConfig, defaultSystemZoneConfig) + descriptorIDs, err := sqlmigrations.ExpectedDescriptorIDs( + context.Background(), db, keys.SystemSQLCodec, defaultZoneConfig, defaultSystemZoneConfig, + ) if err != nil { return 0, err } diff --git a/pkg/sql/alter_table.go b/pkg/sql/alter_table.go index 8a8a8ed54823..7b65ea235173 100644 --- a/pkg/sql/alter_table.go +++ b/pkg/sql/alter_table.go @@ -244,7 +244,7 @@ func (n *alterTableNode) startExec(params runParams) error { } case *tree.AlterTableAddConstraint: - info, err := n.tableDesc.GetConstraintInfo(params.ctx, nil) + info, err := n.tableDesc.GetConstraintInfo(params.ctx, nil, params.ExecCfg().Codec) if err != nil { return err } @@ -579,12 +579,12 @@ func (n *alterTableNode) startExec(params runParams) error { return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "column %q in the middle of being added, try again later", t.Column) } - if err := n.tableDesc.Validate(params.ctx, params.p.txn); err != nil { + if err := n.tableDesc.Validate(params.ctx, params.p.txn, params.ExecCfg().Codec); err != nil { return err } case *tree.AlterTableDropConstraint: - info, err := n.tableDesc.GetConstraintInfo(params.ctx, nil) + info, err := n.tableDesc.GetConstraintInfo(params.ctx, nil, params.ExecCfg().Codec) if err != nil { return err } @@ -606,12 +606,12 @@ func (n *alterTableNode) startExec(params runParams) error { return err } descriptorChanged = true - if err := n.tableDesc.Validate(params.ctx, params.p.txn); err != nil { + if err := n.tableDesc.Validate(params.ctx, params.p.txn, params.ExecCfg().Codec); err != nil { return err } case *tree.AlterTableValidateConstraint: - info, err := n.tableDesc.GetConstraintInfo(params.ctx, nil) + info, err := n.tableDesc.GetConstraintInfo(params.ctx, nil, params.ExecCfg().Codec) if err != nil { return err } @@ -742,7 +742,7 @@ func (n *alterTableNode) startExec(params runParams) error { descriptorChanged = descChanged case *tree.AlterTableRenameConstraint: - info, err := n.tableDesc.GetConstraintInfo(params.ctx, nil) + info, err := n.tableDesc.GetConstraintInfo(params.ctx, nil, params.ExecCfg().Codec) if err != nil { return err } @@ -1008,7 +1008,7 @@ func applyColumnMutation( } } - info, err := tableDesc.GetConstraintInfo(params.ctx, nil) + info, err := tableDesc.GetConstraintInfo(params.ctx, nil, params.ExecCfg().Codec) if err != nil { return err } @@ -1043,7 +1043,7 @@ func applyColumnMutation( "constraint in the middle of being dropped") } } - info, err := tableDesc.GetConstraintInfo(params.ctx, nil) + info, err := tableDesc.GetConstraintInfo(params.ctx, nil, params.ExecCfg().Codec) if err != nil { return err } diff --git a/pkg/sql/authorization.go b/pkg/sql/authorization.go index 23b88ae6a16b..495b3b73d7e2 100644 --- a/pkg/sql/authorization.go +++ b/pkg/sql/authorization.go @@ -222,7 +222,8 @@ func (p *planner) MemberOfWithAdminOption( roleMembersCache := p.execCfg.RoleMemberCache // Lookup table version. - objDesc, err := p.PhysicalSchemaAccessor().GetObjectDesc(ctx, p.txn, p.ExecCfg().Settings, &roleMembersTableName, + objDesc, err := p.PhysicalSchemaAccessor().GetObjectDesc( + ctx, p.txn, p.ExecCfg().Settings, p.ExecCfg().Codec, &roleMembersTableName, p.ObjectLookupFlags(true /*required*/, false /*requireMutable*/)) if err != nil { return nil, err diff --git a/pkg/sql/backfill.go b/pkg/sql/backfill.go index 54ad609439f5..44845c5aebfa 100644 --- a/pkg/sql/backfill.go +++ b/pkg/sql/backfill.go @@ -511,7 +511,7 @@ func (sc *SchemaChanger) validateConstraints( var tableDesc *sqlbase.TableDescriptor if err := sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *kv.Txn) error { - tableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) + tableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, sc.execCfg.Codec, sc.tableID) return err }); err != nil { return err @@ -785,7 +785,7 @@ func (sc *SchemaChanger) distBackfill( if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error todoSpans, _, mutationIdx, err = rowexec.GetResumeSpans( - ctx, sc.jobRegistry, txn, sc.tableID, sc.mutationID, filter) + ctx, sc.jobRegistry, txn, sc.execCfg.Codec, sc.tableID, sc.mutationID, filter) return err }); err != nil { return err @@ -901,7 +901,7 @@ func (sc *SchemaChanger) distBackfill( if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error resumeSpans, _, _, err = rowexec.GetResumeSpans( - ctx, sc.jobRegistry, txn, sc.tableID, sc.mutationID, filter) + ctx, sc.jobRegistry, txn, sc.execCfg.Codec, sc.tableID, sc.mutationID, filter) return err }); err != nil { return err @@ -932,7 +932,7 @@ func (sc *SchemaChanger) updateJobRunningStatus( var tableDesc *sqlbase.TableDescriptor err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error - tableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) + tableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, sc.execCfg.Codec, sc.tableID) if err != nil { return err } @@ -995,7 +995,7 @@ func (sc *SchemaChanger) validateIndexes(ctx context.Context) error { readAsOf := sc.clock.Now() var tableDesc *sqlbase.TableDescriptor if err := sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *kv.Txn) error { - tableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) + tableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, sc.execCfg.Codec, sc.tableID) return err }); err != nil { return err @@ -1322,8 +1322,8 @@ func runSchemaChangesInTxn( // Reclaim all the old names. Leave the data and descriptor // cleanup for later. for _, drain := range tableDesc.DrainingNames { - err := sqlbase.RemoveObjectNamespaceEntry(ctx, planner.Txn(), drain.ParentID, - drain.ParentSchemaID, drain.Name, false /* KVTrace */) + err := sqlbase.RemoveObjectNamespaceEntry(ctx, planner.Txn(), planner.ExecCfg().Codec, + drain.ParentID, drain.ParentSchemaID, drain.Name, false /* KVTrace */) if err != nil { return err } @@ -1647,7 +1647,7 @@ func validateFkInTxn( return errors.AssertionFailedf("foreign key %s does not exist", fkName) } - return validateForeignKey(ctx, tableDesc.TableDesc(), fk, ie, txn) + return validateForeignKey(ctx, tableDesc.TableDesc(), fk, ie, txn, evalCtx.Codec) } // columnBackfillInTxn backfills columns for all mutation columns in diff --git a/pkg/sql/check.go b/pkg/sql/check.go index 955eb37aaa22..76e30b59a485 100644 --- a/pkg/sql/check.go +++ b/pkg/sql/check.go @@ -16,6 +16,7 @@ import ( "fmt" "strings" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -235,8 +236,9 @@ func validateForeignKey( fk *sqlbase.ForeignKeyConstraint, ie *InternalExecutor, txn *kv.Txn, + codec keys.SQLCodec, ) error { - targetTable, err := sqlbase.GetTableDescFromID(ctx, txn, fk.ReferencedTableID) + targetTable, err := sqlbase.GetTableDescFromID(ctx, txn, codec, fk.ReferencedTableID) if err != nil { return err } diff --git a/pkg/sql/colflow/colbatch_scan_test.go b/pkg/sql/colflow/colbatch_scan_test.go index 741a07241701..5da43964fb0b 100644 --- a/pkg/sql/colflow/colbatch_scan_test.go +++ b/pkg/sql/colflow/colbatch_scan_test.go @@ -51,7 +51,7 @@ func BenchmarkColBatchScan(b *testing.B) { numRows, sqlutils.ToRowFn(sqlutils.RowIdxFn, sqlutils.RowModuloFn(42)), ) - tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", tableName) + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", tableName) b.Run(fmt.Sprintf("rows=%d", numRows), func(b *testing.B) { spec := execinfrapb.ProcessorSpec{ Core: execinfrapb.ProcessorCoreUnion{ diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index 7c2cac5214eb..48e1bcf3b327 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -280,7 +280,7 @@ func NewServer(cfg *ExecutorConfig, pool *mon.BytesMonitor) *Server { Metrics: makeMetrics(false /*internal*/), InternalMetrics: makeMetrics(true /*internal*/), // dbCache will be updated on Start(). - dbCache: newDatabaseCacheHolder(newDatabaseCache(systemCfg)), + dbCache: newDatabaseCacheHolder(newDatabaseCache(cfg.Codec, systemCfg)), pool: pool, sqlStats: sqlStats{st: cfg.Settings, apps: make(map[string]*appStats)}, reportedStats: sqlStats{st: cfg.Settings, apps: make(map[string]*appStats)}, @@ -825,6 +825,7 @@ func (ex *connExecutor) close(ctx context.Context, closeType closeType) { ctx, ex.server.cfg.Settings, ex.server.cfg.DB, + ex.server.cfg.Codec, &ie, ex.sessionID, ) diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index 92646063b18a..66ddda8b792c 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -2400,7 +2400,7 @@ CREATE TABLE crdb_internal.zones ( var table *TableDescriptor if zs.Database != "" { - database, err := sqlbase.GetDatabaseDescFromID(ctx, p.txn, sqlbase.ID(id)) + database, err := sqlbase.GetDatabaseDescFromID(ctx, p.txn, p.ExecCfg().Codec, sqlbase.ID(id)) if err != nil { return err } @@ -2408,7 +2408,7 @@ CREATE TABLE crdb_internal.zones ( continue } } else if zoneSpecifier.TableOrIndex.Table.ObjectName != "" { - table, err = sqlbase.GetTableDescFromID(ctx, p.txn, sqlbase.ID(id)) + table, err = sqlbase.GetTableDescFromID(ctx, p.txn, p.ExecCfg().Codec, sqlbase.ID(id)) if err != nil { return err } diff --git a/pkg/sql/crdb_internal_test.go b/pkg/sql/crdb_internal_test.go index b0821fb7772d..af30e983cc89 100644 --- a/pkg/sql/crdb_internal_test.go +++ b/pkg/sql/crdb_internal_test.go @@ -53,9 +53,9 @@ func TestGetAllNamesInternal(t *testing.T) { err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { batch := txn.NewBatch() - batch.Put(sqlbase.NewTableKey(999, 444, "bob").Key(), 9999) - batch.Put(sqlbase.NewDeprecatedTableKey(1000, "alice").Key(), 10000) - batch.Put(sqlbase.NewDeprecatedTableKey(999, "overwrite_me_old_value").Key(), 9999) + batch.Put(sqlbase.NewTableKey(999, 444, "bob").Key(keys.SystemSQLCodec), 9999) + batch.Put(sqlbase.NewDeprecatedTableKey(1000, "alice").Key(keys.SystemSQLCodec), 10000) + batch.Put(sqlbase.NewDeprecatedTableKey(999, "overwrite_me_old_value").Key(keys.SystemSQLCodec), 9999) return txn.CommitInBatch(ctx, batch) }) require.NoError(t, err) @@ -180,7 +180,7 @@ CREATE TABLE t.test (k INT); // We now want to create a pre-2.1 table descriptor with an // old-style bit column. We're going to edit the table descriptor // manually, without going through SQL. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") for i := range tableDesc.Columns { if tableDesc.Columns[i].Name == "k" { tableDesc.Columns[i].Type.InternalType.VisibleType = 4 // Pre-2.1 BIT. @@ -216,7 +216,7 @@ CREATE TABLE t.test (k INT); if err := txn.SetSystemConfigTrigger(); err != nil { return err } - return txn.Put(ctx, sqlbase.MakeDescMetadataKey(tableDesc.ID), sqlbase.WrapDescriptor(tableDesc)) + return txn.Put(ctx, sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), sqlbase.WrapDescriptor(tableDesc)) }); err != nil { t.Fatal(err) } @@ -273,7 +273,7 @@ SELECT column_name, character_maximum_length, numeric_precision, numeric_precisi } // And verify that this has re-set the fields. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") found := false for i := range tableDesc.Columns { col := &tableDesc.Columns[i] diff --git a/pkg/sql/create_index.go b/pkg/sql/create_index.go index 70cceaa1c742..926d74641f14 100644 --- a/pkg/sql/create_index.go +++ b/pkg/sql/create_index.go @@ -82,7 +82,7 @@ func (p *planner) setupFamilyAndConstraintForShard( if err != nil { return err } - info, err := tableDesc.GetConstraintInfo(ctx, nil) + info, err := tableDesc.GetConstraintInfo(ctx, nil, p.ExecCfg().Codec) if err != nil { return err } diff --git a/pkg/sql/create_sequence.go b/pkg/sql/create_sequence.go index 6924871b7cf7..ce0e9bfc1de5 100644 --- a/pkg/sql/create_sequence.go +++ b/pkg/sql/create_sequence.go @@ -118,7 +118,7 @@ func doCreateSequence( dbDesc.ID, schemaID, name.Table(), - ).Key() + ).Key(params.ExecCfg().Codec) if err = params.p.createDescriptorWithID( params.ctx, key, id, &desc, params.EvalContext().Settings, jobDesc, ); err != nil { @@ -133,7 +133,7 @@ func doCreateSequence( return err } - if err := desc.Validate(params.ctx, params.p.txn); err != nil { + if err := desc.Validate(params.ctx, params.p.txn, params.ExecCfg().Codec); err != nil { return err } diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index bf3ddd1c6e67..74f46fd6aa77 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -180,7 +180,7 @@ func getTableCreateParams( tempSchemaName := params.p.TemporarySchemaName() sKey := sqlbase.NewSchemaKey(dbID, tempSchemaName) var err error - schemaID, err = getDescriptorID(params.ctx, params.p.txn, sKey) + schemaID, err = getDescriptorID(params.ctx, params.p.txn, params.ExecCfg().Codec, sKey) if err != nil { return nil, 0, err } else if schemaID == sqlbase.InvalidID { @@ -193,7 +193,7 @@ func getTableCreateParams( tKey = sqlbase.NewTableKey(dbID, schemaID, tableName) } - exists, _, err := sqlbase.LookupObjectID(params.ctx, params.p.txn, dbID, schemaID, tableName) + exists, _, err := sqlbase.LookupObjectID(params.ctx, params.p.txn, params.ExecCfg().Codec, dbID, schemaID, tableName) if err == nil && exists { // Still return data in this case. return tKey, schemaID, sqlbase.NewRelationAlreadyExistsError(tableName) @@ -321,7 +321,7 @@ func (n *createTableNode) startExec(params runParams) error { // Descriptor written to store here. if err := params.p.createDescriptorWithID( - params.ctx, tKey.Key(), id, &desc, params.EvalContext().Settings, + params.ctx, tKey.Key(params.ExecCfg().Codec), id, &desc, params.EvalContext().Settings, tree.AsStringWithFQNames(n.n, params.Ann()), ); err != nil { return err @@ -344,7 +344,7 @@ func (n *createTableNode) startExec(params runParams) error { } } - if err := desc.Validate(params.ctx, params.p.txn); err != nil { + if err := desc.Validate(params.ctx, params.p.txn, params.ExecCfg().Codec); err != nil { return err } @@ -502,18 +502,18 @@ func (p *planner) resolveFK( } func qualifyFKColErrorWithDB( - ctx context.Context, txn *kv.Txn, tbl *sqlbase.TableDescriptor, col string, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, tbl *sqlbase.TableDescriptor, col string, ) string { if txn == nil { return tree.ErrString(tree.NewUnresolvedName(tbl.Name, col)) } // TODO(solon): this ought to use a database cache. - db, err := sqlbase.GetDatabaseDescFromID(ctx, txn, tbl.ParentID) + db, err := sqlbase.GetDatabaseDescFromID(ctx, txn, codec, tbl.ParentID) if err != nil { return tree.ErrString(tree.NewUnresolvedName(tbl.Name, col)) } - schema, err := schema.ResolveNameByID(ctx, txn, db.ID, tbl.GetParentSchemaID()) + schema, err := schema.ResolveNameByID(ctx, txn, codec, db.ID, tbl.GetParentSchemaID()) if err != nil { return tree.ErrString(tree.NewUnresolvedName(tbl.Name, col)) } @@ -545,7 +545,7 @@ func (p *planner) MaybeUpgradeDependentOldForeignKeyVersionTables( maybeUpgradeFKRepresentation := func(id sqlbase.ID) error { // Read the referenced table and see if the foreign key representation has changed. If it has, write // the upgraded descriptor back to disk. - tbl, didUpgrade, err := sqlbase.GetTableDescFromIDWithFKsChanged(ctx, p.txn, id) + tbl, didUpgrade, err := sqlbase.GetTableDescFromIDWithFKsChanged(ctx, p.txn, p.ExecCfg().Codec, id) if err != nil { return err } @@ -693,7 +693,7 @@ func ResolveFK( // or else we can hit other checks that break things with // undesired error codes, e.g. #42858. // It may be removable after #37255 is complete. - constraintInfo, err := tbl.GetConstraintInfo(ctx, nil) + constraintInfo, err := tbl.GetConstraintInfo(ctx, nil, evalCtx.Codec) if err != nil { return err } @@ -722,7 +722,7 @@ func ResolveFK( if d.Actions.Delete == tree.SetNull || d.Actions.Update == tree.SetNull { for _, originColumn := range originCols { if !originColumn.Nullable { - col := qualifyFKColErrorWithDB(ctx, txn, tbl.TableDesc(), originColumn.Name) + col := qualifyFKColErrorWithDB(ctx, txn, evalCtx.Codec, tbl.TableDesc(), originColumn.Name) return pgerror.Newf(pgcode.InvalidForeignKey, "cannot add a SET NULL cascading action on column %q which has a NOT NULL constraint", col, ) @@ -737,7 +737,7 @@ func ResolveFK( // Having a default expression of NULL, and a constraint of NOT NULL is a // contradiction and should never be allowed. if originColumn.DefaultExpr == nil && !originColumn.Nullable { - col := qualifyFKColErrorWithDB(ctx, txn, tbl.TableDesc(), originColumn.Name) + col := qualifyFKColErrorWithDB(ctx, txn, evalCtx.Codec, tbl.TableDesc(), originColumn.Name) return pgerror.Newf(pgcode.InvalidForeignKey, "cannot add a SET DEFAULT cascading action on column %q which has a "+ "NOT NULL constraint and a NULL default expression", col, diff --git a/pkg/sql/create_test.go b/pkg/sql/create_test.go index 4ca0102bb7cc..0189f7b41335 100644 --- a/pkg/sql/create_test.go +++ b/pkg/sql/create_test.go @@ -50,7 +50,7 @@ func TestDatabaseDescriptor(t *testing.T) { } // Database name. - nameKey := sqlbase.NewDatabaseKey("test").Key() + nameKey := sqlbase.NewDatabaseKey("test").Key(keys.SystemSQLCodec) if gr, err := kvDB.Get(ctx, nameKey); err != nil { t.Fatal(err) } else if gr.Exists() { @@ -58,7 +58,7 @@ func TestDatabaseDescriptor(t *testing.T) { } // Write a descriptor key that will interfere with database creation. - dbDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(expectedCounter)) + dbDescKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(expectedCounter)) dbDesc := &sqlbase.Descriptor{ Union: &sqlbase.Descriptor_Database{ Database: &sqlbase.DatabaseDescriptor{ @@ -91,7 +91,9 @@ func TestDatabaseDescriptor(t *testing.T) { if kvs, err := kvDB.Scan(ctx, start, start.PrefixEnd(), 0 /* maxRows */); err != nil { t.Fatal(err) } else { - descriptorIDs, err := sqlmigrations.ExpectedDescriptorIDs(ctx, kvDB, &s.(*server.TestServer).Cfg.DefaultZoneConfig, &s.(*server.TestServer).Cfg.DefaultSystemZoneConfig) + descriptorIDs, err := sqlmigrations.ExpectedDescriptorIDs( + ctx, kvDB, keys.SystemSQLCodec, &s.(*server.TestServer).Cfg.DefaultZoneConfig, &s.(*server.TestServer).Cfg.DefaultSystemZoneConfig, + ) if err != nil { t.Fatal(err) } @@ -111,7 +113,7 @@ func TestDatabaseDescriptor(t *testing.T) { t.Fatal(err) } - dbDescKey = sqlbase.MakeDescMetadataKey(sqlbase.ID(expectedCounter)) + dbDescKey = sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(expectedCounter)) if _, err := sqlDB.Exec(`CREATE DATABASE test`); err != nil { t.Fatal(err) } @@ -228,7 +230,7 @@ func verifyTables( count++ tableName := fmt.Sprintf("table_%d", id) kvDB := tc.Servers[count%tc.NumServers()].DB() - tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", tableName) + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", tableName) if tableDesc.ID < descIDStart { t.Fatalf( "table %s's ID %d is too small. Expected >= %d", @@ -260,7 +262,7 @@ func verifyTables( if _, ok := tableIDs[id]; ok { continue } - descKey := sqlbase.MakeDescMetadataKey(id) + descKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, id) desc := &sqlbase.Descriptor{} if err := kvDB.GetProto(context.TODO(), descKey, desc); err != nil { t.Fatal(err) diff --git a/pkg/sql/create_view.go b/pkg/sql/create_view.go index 6b792cfef1f2..87898d223ef1 100644 --- a/pkg/sql/create_view.go +++ b/pkg/sql/create_view.go @@ -87,7 +87,7 @@ func (n *createViewNode) startExec(params runParams) error { case n.replace: // If we are replacing an existing view see if what we are // replacing is actually a view. - id, err := getDescriptorID(params.ctx, params.p.txn, tKey) + id, err := getDescriptorID(params.ctx, params.p.txn, params.ExecCfg().Codec, tKey) if err != nil { return err } @@ -157,7 +157,7 @@ func (n *createViewNode) startExec(params runParams) error { // TODO (lucy): I think this needs a NodeFormatter implementation. For now, // do some basic string formatting (not accurate in the general case). if err = params.p.createDescriptorWithID( - params.ctx, tKey.Key(), id, &desc, params.EvalContext().Settings, + params.ctx, tKey.Key(params.ExecCfg().Codec), id, &desc, params.EvalContext().Settings, fmt.Sprintf("CREATE VIEW %q AS %q", n.viewName, n.viewQuery), ); err != nil { return err @@ -195,7 +195,7 @@ func (n *createViewNode) startExec(params runParams) error { } } - if err := newDesc.Validate(params.ctx, params.p.txn); err != nil { + if err := newDesc.Validate(params.ctx, params.p.txn, params.ExecCfg().Codec); err != nil { return err } diff --git a/pkg/sql/database.go b/pkg/sql/database.go index 2298acd3f69f..0d87679275b2 100644 --- a/pkg/sql/database.go +++ b/pkg/sql/database.go @@ -16,6 +16,7 @@ import ( "sync" "github.com/cockroachdb/cockroach/pkg/config" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -42,13 +43,17 @@ type databaseCache struct { // databases is really a map of string -> sqlbase.ID databases sync.Map + // codec is used to encode and decode sql keys. + codec keys.SQLCodec + // systemConfig holds a copy of the latest system config since the last // call to resetForBatch. systemConfig *config.SystemConfig } -func newDatabaseCache(cfg *config.SystemConfig) *databaseCache { +func newDatabaseCache(codec keys.SQLCodec, cfg *config.SystemConfig) *databaseCache { return &databaseCache{ + codec: codec, systemConfig: cfg, } } @@ -75,12 +80,12 @@ func makeDatabaseDesc(p *tree.CreateDatabase) sqlbase.DatabaseDescriptor { // getDatabaseID resolves a database name into a database ID. // Returns InvalidID on failure. func getDatabaseID( - ctx context.Context, txn *kv.Txn, name string, required bool, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, name string, required bool, ) (sqlbase.ID, error) { if name == sqlbase.SystemDB.Name { return sqlbase.SystemDB.ID, nil } - found, dbID, err := sqlbase.LookupDatabaseID(ctx, txn, name) + found, dbID, err := sqlbase.LookupDatabaseID(ctx, txn, codec, name) if err != nil { return sqlbase.InvalidID, err } @@ -94,10 +99,10 @@ func getDatabaseID( // returning nil if the descriptor is not found. If you want the "not // found" condition to return an error, use mustGetDatabaseDescByID() instead. func getDatabaseDescByID( - ctx context.Context, txn *kv.Txn, id sqlbase.ID, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id sqlbase.ID, ) (*sqlbase.DatabaseDescriptor, error) { desc := &sqlbase.DatabaseDescriptor{} - if err := getDescriptorByID(ctx, txn, id, desc); err != nil { + if err := getDescriptorByID(ctx, txn, codec, id, desc); err != nil { return nil, err } return desc, nil @@ -106,9 +111,9 @@ func getDatabaseDescByID( // MustGetDatabaseDescByID looks up the database descriptor given its ID, // returning an error if the descriptor is not found. func MustGetDatabaseDescByID( - ctx context.Context, txn *kv.Txn, id sqlbase.ID, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id sqlbase.ID, ) (*sqlbase.DatabaseDescriptor, error) { - desc, err := getDatabaseDescByID(ctx, txn, id) + desc, err := getDatabaseDescByID(ctx, txn, codec, id) if err != nil { return nil, err } @@ -142,7 +147,7 @@ func (dc *databaseCache) getCachedDatabaseDescByID( return &sysDB, nil } - descKey := sqlbase.MakeDescMetadataKey(id) + descKey := sqlbase.MakeDescMetadataKey(dc.codec, id) descVal := dc.systemConfig.GetValue(descKey) if descVal == nil { return nil, nil @@ -188,7 +193,7 @@ func (dc *databaseCache) getDatabaseDesc( return err } a := UncachedPhysicalAccessor{} - desc, err = a.GetDatabaseDesc(ctx, txn, name, + desc, err = a.GetDatabaseDesc(ctx, txn, dc.codec, name, tree.DatabaseLookupFlags{Required: required}) return err }); err != nil { @@ -211,7 +216,7 @@ func (dc *databaseCache) getDatabaseDescByID( if err != nil { log.VEventf(ctx, 3, "error getting database descriptor from cache: %s", err) } - desc, err = MustGetDatabaseDescByID(ctx, txn, id) + desc, err = MustGetDatabaseDescByID(ctx, txn, dc.codec, id) } return desc, err } @@ -240,7 +245,7 @@ func (dc *databaseCache) getDatabaseID( return err } var err error - dbID, err = getDatabaseID(ctx, txn, name, required) + dbID, err = getDatabaseID(ctx, txn, dc.codec, name, required) return err }); err != nil { return sqlbase.InvalidID, err @@ -264,12 +269,12 @@ func (dc *databaseCache) getCachedDatabaseID(name string) (sqlbase.ID, error) { } var nameKey sqlbase.DescriptorKey = sqlbase.NewDatabaseKey(name) - nameVal := dc.systemConfig.GetValue(nameKey.Key()) + nameVal := dc.systemConfig.GetValue(nameKey.Key(dc.codec)) if nameVal == nil { // Try the deprecated system.namespace before returning InvalidID. // TODO(solon): This can be removed in 20.2. nameKey = sqlbase.NewDeprecatedDatabaseKey(name) - nameVal = dc.systemConfig.GetValue(nameKey.Key()) + nameVal = dc.systemConfig.GetValue(nameKey.Key(dc.codec)) if nameVal == nil { return sqlbase.InvalidID, nil } @@ -289,17 +294,17 @@ func (p *planner) renameDatabase( return err } - if exists, _, err := sqlbase.LookupDatabaseID(ctx, p.txn, newName); err == nil && exists { + if exists, _, err := sqlbase.LookupDatabaseID(ctx, p.txn, p.ExecCfg().Codec, newName); err == nil && exists { return pgerror.Newf(pgcode.DuplicateDatabase, "the new database name %q already exists", newName) } else if err != nil { return err } - newKey := sqlbase.MakeDatabaseNameKey(ctx, p.ExecCfg().Settings, newName).Key() + newKey := sqlbase.MakeDatabaseNameKey(ctx, p.ExecCfg().Settings, newName).Key(p.ExecCfg().Codec) descID := oldDesc.GetID() - descKey := sqlbase.MakeDescMetadataKey(descID) + descKey := sqlbase.MakeDescMetadataKey(p.ExecCfg().Codec, descID) descDesc := sqlbase.WrapDescriptor(oldDesc) b := &kv.Batch{} @@ -310,7 +315,7 @@ func (p *planner) renameDatabase( b.CPut(newKey, descID, nil) b.Put(descKey, descDesc) err := sqlbase.RemoveDatabaseNamespaceEntry( - ctx, p.txn, oldName, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), + ctx, p.txn, p.ExecCfg().Codec, oldName, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), ) if err != nil { return err diff --git a/pkg/sql/database_test.go b/pkg/sql/database_test.go index ed5ad7bb17ec..7c7b1ec59cd4 100644 --- a/pkg/sql/database_test.go +++ b/pkg/sql/database_test.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -52,14 +53,14 @@ func TestDatabaseAccessors(t *testing.T) { defer s.Stopper().Stop(context.TODO()) if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { - if _, err := getDatabaseDescByID(ctx, txn, sqlbase.SystemDB.ID); err != nil { + if _, err := getDatabaseDescByID(ctx, txn, keys.SystemSQLCodec, sqlbase.SystemDB.ID); err != nil { return err } - if _, err := MustGetDatabaseDescByID(ctx, txn, sqlbase.SystemDB.ID); err != nil { + if _, err := MustGetDatabaseDescByID(ctx, txn, keys.SystemSQLCodec, sqlbase.SystemDB.ID); err != nil { return err } - databaseCache := newDatabaseCache(config.NewSystemConfig(zonepb.DefaultZoneConfigRef())) + databaseCache := newDatabaseCache(keys.SystemSQLCodec, config.NewSystemConfig(zonepb.DefaultZoneConfigRef())) _, err := databaseCache.getDatabaseDescByID(ctx, txn, sqlbase.SystemDB.ID) return err }); err != nil { diff --git a/pkg/sql/delete_test.go b/pkg/sql/delete_test.go index 6ae65e1d6b0b..0a089b7ad267 100644 --- a/pkg/sql/delete_test.go +++ b/pkg/sql/delete_test.go @@ -16,6 +16,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -105,12 +106,12 @@ CREATE TABLE IF NOT EXISTS child_with_index( defer drop(tableNames...) tablesByID := make(map[sqlbase.ID]*ImmutableTableDescriptor) - pd := sqlbase.GetImmutableTableDescriptor(kvDB, "defaultdb", "parent") + pd := sqlbase.GetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, "defaultdb", "parent") tablesByID[pd.ID] = pd for _, tableName := range tableNames { if tableName != "parent" { - cd := sqlbase.GetImmutableTableDescriptor(kvDB, "defaultdb", tableName) + cd := sqlbase.GetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, "defaultdb", tableName) tablesByID[cd.ID] = cd } } diff --git a/pkg/sql/descriptor.go b/pkg/sql/descriptor.go index a08a0c74f281..2b01fc514b08 100644 --- a/pkg/sql/descriptor.go +++ b/pkg/sql/descriptor.go @@ -82,7 +82,7 @@ func (p *planner) createDatabase( shouldCreatePublicSchema = false } - if exists, _, err := sqlbase.LookupDatabaseID(ctx, p.txn, desc.Name); err == nil && exists { + if exists, _, err := sqlbase.LookupDatabaseID(ctx, p.txn, p.ExecCfg().Codec, desc.Name); err == nil && exists { if ifNotExists { // Noop. return false, nil @@ -97,7 +97,7 @@ func (p *planner) createDatabase( return false, err } - if err := p.createDescriptorWithID(ctx, dKey.Key(), id, desc, nil, jobDesc); err != nil { + if err := p.createDescriptorWithID(ctx, dKey.Key(p.ExecCfg().Codec), id, desc, nil, jobDesc); err != nil { return true, err } @@ -105,7 +105,7 @@ func (p *planner) createDatabase( // be created in every database in >= 20.2. if shouldCreatePublicSchema { // Every database must be initialized with the public schema. - if err := p.createSchemaWithID(ctx, sqlbase.NewPublicSchemaKey(id).Key(), keys.PublicSchemaID); err != nil { + if err := p.createSchemaWithID(ctx, sqlbase.NewPublicSchemaKey(id).Key(p.ExecCfg().Codec), keys.PublicSchemaID); err != nil { return true, err } } @@ -138,7 +138,15 @@ func (p *planner) createDescriptorWithID( log.VEventf(ctx, 2, "CPut %s -> %d", idKey, descID) } b.CPut(idKey, descID, nil) - if err := WriteNewDescToBatch(ctx, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), st, b, descID, descriptor); err != nil { + if err := WriteNewDescToBatch( + ctx, + p.ExtendedEvalContext().Tracing.KVTracingEnabled(), + st, + b, + p.ExecCfg().Codec, + descID, + descriptor, + ); err != nil { return err } @@ -171,9 +179,9 @@ func (p *planner) createDescriptorWithID( // getDescriptorID looks up the ID for plainKey. // InvalidID is returned if the name cannot be resolved. func getDescriptorID( - ctx context.Context, txn *kv.Txn, plainKey sqlbase.DescriptorKey, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, plainKey sqlbase.DescriptorKey, ) (sqlbase.ID, error) { - key := plainKey.Key() + key := plainKey.Key(codec) log.Eventf(ctx, "looking up descriptor ID for name key %q", key) gr, err := txn.Get(ctx, key) if err != nil { @@ -187,7 +195,7 @@ func getDescriptorID( // resolveSchemaID resolves a schema's ID based on db and name. func resolveSchemaID( - ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, scName string, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, dbID sqlbase.ID, scName string, ) (bool, sqlbase.ID, error) { // Try to use the system name resolution bypass. Avoids a hotspot by explicitly // checking for public schema. @@ -196,7 +204,7 @@ func resolveSchemaID( } sKey := sqlbase.NewSchemaKey(dbID, scName) - schemaID, err := getDescriptorID(ctx, txn, sKey) + schemaID, err := getDescriptorID(ctx, txn, codec, sKey) if err != nil || schemaID == sqlbase.InvalidID { return false, sqlbase.InvalidID, err } @@ -209,15 +217,15 @@ func resolveSchemaID( // Returns the descriptor (if found), a bool representing whether the // descriptor was found and an error if any. func lookupDescriptorByID( - ctx context.Context, txn *kv.Txn, id sqlbase.ID, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, id sqlbase.ID, ) (sqlbase.DescriptorProto, bool, error) { var desc sqlbase.DescriptorProto for _, lookupFn := range []func() (sqlbase.DescriptorProto, error){ func() (sqlbase.DescriptorProto, error) { - return sqlbase.GetTableDescFromID(ctx, txn, id) + return sqlbase.GetTableDescFromID(ctx, txn, codec, id) }, func() (sqlbase.DescriptorProto, error) { - return sqlbase.GetDatabaseDescFromID(ctx, txn, id) + return sqlbase.GetDatabaseDescFromID(ctx, txn, codec, id) }, } { var err error @@ -239,10 +247,14 @@ func lookupDescriptorByID( // In most cases you'll want to use wrappers: `getDatabaseDescByID` or // `getTableDescByID`. func getDescriptorByID( - ctx context.Context, txn *kv.Txn, id sqlbase.ID, descriptor sqlbase.DescriptorProto, + ctx context.Context, + txn *kv.Txn, + codec keys.SQLCodec, + id sqlbase.ID, + descriptor sqlbase.DescriptorProto, ) error { log.Eventf(ctx, "fetching descriptor with ID %d", id) - descKey := sqlbase.MakeDescMetadataKey(id) + descKey := sqlbase.MakeDescMetadataKey(codec, id) desc := &sqlbase.Descriptor{} ts, err := txn.GetProtoTs(ctx, descKey, desc) if err != nil { @@ -255,11 +267,11 @@ func getDescriptorByID( return pgerror.Newf(pgcode.WrongObjectType, "%q is not a table", desc.String()) } - if err := table.MaybeFillInDescriptor(ctx, txn); err != nil { + if err := table.MaybeFillInDescriptor(ctx, txn, codec); err != nil { return err } - if err := table.Validate(ctx, txn); err != nil { + if err := table.Validate(ctx, txn, codec); err != nil { return err } *t = *table @@ -286,8 +298,8 @@ func IsDefaultCreatedDescriptor(descID sqlbase.ID) bool { // CountUserDescriptors returns the number of descriptors present that were // created by the user (i.e. not present when the cluster started). -func CountUserDescriptors(ctx context.Context, txn *kv.Txn) (int, error) { - allDescs, err := GetAllDescriptors(ctx, txn) +func CountUserDescriptors(ctx context.Context, txn *kv.Txn, codec keys.SQLCodec) (int, error) { + allDescs, err := GetAllDescriptors(ctx, txn, codec) if err != nil { return 0, err } @@ -303,9 +315,11 @@ func CountUserDescriptors(ctx context.Context, txn *kv.Txn) (int, error) { } // GetAllDescriptors looks up and returns all available descriptors. -func GetAllDescriptors(ctx context.Context, txn *kv.Txn) ([]sqlbase.DescriptorProto, error) { +func GetAllDescriptors( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, +) ([]sqlbase.DescriptorProto, error) { log.Eventf(ctx, "fetching all descriptors") - descsKey := sqlbase.MakeAllDescsMetadataKey() + descsKey := sqlbase.MakeAllDescsMetadataKey(codec) kvs, err := txn.Scan(ctx, descsKey, descsKey.PrefixEnd(), 0) if err != nil { return nil, err @@ -320,7 +334,7 @@ func GetAllDescriptors(ctx context.Context, txn *kv.Txn) ([]sqlbase.DescriptorPr switch t := desc.Union.(type) { case *sqlbase.Descriptor_Table: table := desc.Table(kv.Value.Timestamp) - if err := table.MaybeFillInDescriptor(ctx, txn); err != nil { + if err := table.MaybeFillInDescriptor(ctx, txn, codec); err != nil { return nil, err } descs = append(descs, table) @@ -335,9 +349,11 @@ func GetAllDescriptors(ctx context.Context, txn *kv.Txn) ([]sqlbase.DescriptorPr // GetAllDatabaseDescriptorIDs looks up and returns all available database // descriptor IDs. -func GetAllDatabaseDescriptorIDs(ctx context.Context, txn *kv.Txn) ([]sqlbase.ID, error) { +func GetAllDatabaseDescriptorIDs( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, +) ([]sqlbase.ID, error) { log.Eventf(ctx, "fetching all database descriptor IDs") - nameKey := sqlbase.NewDatabaseKey("" /* name */).Key() + nameKey := sqlbase.NewDatabaseKey("" /* name */).Key(codec) kvs, err := txn.Scan(ctx, nameKey, nameKey.PrefixEnd(), 0 /*maxRows */) if err != nil { return nil, err @@ -346,7 +362,7 @@ func GetAllDatabaseDescriptorIDs(ctx context.Context, txn *kv.Txn) ([]sqlbase.ID // func (a UncachedPhysicalAccessor) GetObjectNames. Same concept // applies here. // TODO(solon): This complexity can be removed in 20.2. - nameKey = sqlbase.NewDeprecatedDatabaseKey("" /* name */).Key() + nameKey = sqlbase.NewDeprecatedDatabaseKey("" /* name */).Key(codec) dkvs, err := txn.Scan(ctx, nameKey, nameKey.PrefixEnd(), 0 /* maxRows */) if err != nil { return nil, err @@ -374,10 +390,11 @@ func writeDescToBatch( kvTrace bool, s *cluster.Settings, b *kv.Batch, + codec keys.SQLCodec, descID sqlbase.ID, desc sqlbase.DescriptorProto, ) (err error) { - descKey := sqlbase.MakeDescMetadataKey(descID) + descKey := sqlbase.MakeDescMetadataKey(codec, descID) descDesc := sqlbase.WrapDescriptor(desc) if kvTrace { log.VEventf(ctx, 2, "Put %s -> %s", descKey, descDesc) @@ -395,10 +412,11 @@ func WriteNewDescToBatch( kvTrace bool, s *cluster.Settings, b *kv.Batch, + codec keys.SQLCodec, tableID sqlbase.ID, desc sqlbase.DescriptorProto, ) (err error) { - descKey := sqlbase.MakeDescMetadataKey(tableID) + descKey := sqlbase.MakeDescMetadataKey(codec, tableID) descDesc := sqlbase.WrapDescriptor(desc) if kvTrace { log.VEventf(ctx, 2, "CPut %s -> %s", descKey, descDesc) diff --git a/pkg/sql/descriptor_mutation_test.go b/pkg/sql/descriptor_mutation_test.go index e41756333051..a0496fba61d4 100644 --- a/pkg/sql/descriptor_mutation_test.go +++ b/pkg/sql/descriptor_mutation_test.go @@ -85,7 +85,7 @@ func (mt mutationTest) makeMutationsActive() { } if err := mt.kvDB.Put( context.TODO(), - sqlbase.MakeDescMetadataKey(mt.tableDesc.ID), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, mt.tableDesc.ID), sqlbase.WrapDescriptor(mt.tableDesc), ); err != nil { mt.Fatal(err) @@ -143,7 +143,7 @@ func (mt mutationTest) writeMutation(m sqlbase.DescriptorMutation) { } if err := mt.kvDB.Put( context.TODO(), - sqlbase.MakeDescMetadataKey(mt.tableDesc.ID), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, mt.tableDesc.ID), sqlbase.WrapDescriptor(mt.tableDesc), ); err != nil { mt.Fatal(err) @@ -179,7 +179,7 @@ ALTER TABLE t.test ADD COLUMN i VARCHAR NOT NULL DEFAULT 'i'; } // read table descriptor - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") mTest := makeMutationTest(t, kvDB, sqlDB, tableDesc) // Add column "i" as a mutation in delete/write. @@ -238,7 +238,7 @@ CREATE INDEX allidx ON t.test (k, v); } // read table descriptor - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") mTest := makeMutationTest(t, kvDB, sqlDB, tableDesc) @@ -253,7 +253,7 @@ CREATE INDEX allidx ON t.test (k, v); // Init table to start state. mTest.Exec(t, `TRUNCATE TABLE t.test`) // read table descriptor - mTest.tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + mTest.tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") initRows := [][]string{{"a", "z", "q"}} for _, row := range initRows { @@ -499,7 +499,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo (v)); } // read table descriptor - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") mTest := makeMutationTest(t, kvDB, sqlDB, tableDesc) @@ -514,7 +514,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR, INDEX foo (v)); t.Fatal(err) } // read table descriptor - mTest.tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + mTest.tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") initRows := [][]string{{"a", "z"}, {"b", "y"}} for _, row := range initRows { @@ -650,7 +650,7 @@ CREATE INDEX allidx ON t.test (k, v); } // read table descriptor - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") mTest := makeMutationTest(t, kvDB, sqlDB, tableDesc) @@ -678,7 +678,7 @@ CREATE INDEX allidx ON t.test (k, v); } // read table descriptor - mTest.tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + mTest.tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") initRows := [][]string{{"a", "z", "q"}, {"b", "y", "r"}} for _, row := range initRows { @@ -851,7 +851,7 @@ CREATE TABLE t.test (a STRING PRIMARY KEY, b STRING, c STRING, INDEX foo (c)); } // Read table descriptor - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") mt := makeMutationTest(t, kvDB, sqlDB, tableDesc) @@ -981,7 +981,7 @@ CREATE TABLE t.test (a STRING PRIMARY KEY, b STRING, c STRING, INDEX foo (c)); mt.Exec(t, `ALTER TABLE t.test RENAME COLUMN c TO d`) // The mutation in the table descriptor has changed and we would like // to update our copy to make it live. - mt.tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + mt.tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Make "ufo" live. mt.makeMutationsActive() @@ -1005,7 +1005,7 @@ CREATE TABLE t.test (a STRING PRIMARY KEY, b STRING, c STRING, INDEX foo (c)); // The mutation in the table descriptor has changed and we would like // to update our copy to make it live. - mt.tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + mt.tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Make column "e" live. mt.makeMutationsActive() @@ -1086,7 +1086,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR UNIQUE); } // read table descriptor - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") expected := []struct { name string @@ -1155,12 +1155,12 @@ func TestAddingFKs(t *testing.T) { } // Step the referencing table back to the ADD state. - ordersDesc := sqlbase.GetTableDescriptor(kvDB, "t", "orders") + ordersDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "orders") ordersDesc.State = sqlbase.TableDescriptor_ADD ordersDesc.Version++ if err := kvDB.Put( context.TODO(), - sqlbase.MakeDescMetadataKey(ordersDesc.ID), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, ordersDesc.ID), sqlbase.WrapDescriptor(ordersDesc), ); err != nil { t.Fatal(err) diff --git a/pkg/sql/distsql_physical_planner_test.go b/pkg/sql/distsql_physical_planner_test.go index 35b49d94b252..1701d0150f0d 100644 --- a/pkg/sql/distsql_physical_planner_test.go +++ b/pkg/sql/distsql_physical_planner_test.go @@ -24,6 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -156,7 +157,7 @@ func TestPlanningDuringSplitsAndMerges(t *testing.T) { return default: // Split the table at a random row. - desc := sqlbase.GetTableDescriptor(cdb, "test", "t") + desc := sqlbase.GetTableDescriptor(cdb, keys.SystemSQLCodec, "test", "t") val := rng.Intn(n) t.Logf("splitting at %d", val) diff --git a/pkg/sql/distsql_plan_backfill_test.go b/pkg/sql/distsql_plan_backfill_test.go index 38c06635d60f..11078c72aa07 100644 --- a/pkg/sql/distsql_plan_backfill_test.go +++ b/pkg/sql/distsql_plan_backfill_test.go @@ -16,6 +16,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -74,7 +75,7 @@ func TestDistBackfill(t *testing.T) { sqlutils.ToRowFn(sqlutils.RowIdxFn, sqlutils.RowEnglishFn), ) // Split the table into multiple ranges. - descNumToStr := sqlbase.GetTableDescriptor(cdb, "test", "numtostr") + descNumToStr := sqlbase.GetTableDescriptor(cdb, keys.SystemSQLCodec, "test", "numtostr") var sps []SplitPoint //for i := 1; i <= numNodes-1; i++ { for i := numNodes - 1; i > 0; i-- { diff --git a/pkg/sql/distsql_plan_join_test.go b/pkg/sql/distsql_plan_join_test.go index bc75d2d518f3..346bf2e563b4 100644 --- a/pkg/sql/distsql_plan_join_test.go +++ b/pkg/sql/distsql_plan_join_test.go @@ -110,7 +110,7 @@ func encodeTestKey(kvDB *kv.DB, keyStr string) (roachpb.Key, error) { for _, tok := range tokens { // Encode the table ID if the token is a table name. if tableNames[tok] { - desc := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, tok) + desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, tok) key = encoding.EncodeUvarintAscending(key, uint64(desc.ID)) continue } @@ -153,7 +153,7 @@ func decodeTestKey(kvDB *kv.DB, key roachpb.Key) (string, error) { } if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { - desc, err := sqlbase.GetTableDescFromID(context.TODO(), txn, sqlbase.ID(descID)) + desc, err := sqlbase.GetTableDescFromID(context.TODO(), txn, keys.SystemSQLCodec, sqlbase.ID(descID)) if err != nil { return err } diff --git a/pkg/sql/drop_database.go b/pkg/sql/drop_database.go index 18816955be23..b40fc6ba7bcc 100644 --- a/pkg/sql/drop_database.go +++ b/pkg/sql/drop_database.go @@ -76,7 +76,7 @@ func (p *planner) DropDatabase(ctx context.Context, n *tree.DropDatabase) (planN tempSchemasToDelete := make(map[ClusterWideID]struct{}) for _, schema := range schemas { toAppend, err := GetObjectNames( - ctx, p.txn, p, dbDesc, schema, true, /*explicitPrefix*/ + ctx, p.txn, p, p.ExecCfg().Codec, dbDesc, schema, true, /*explicitPrefix*/ ) if err != nil { return nil, err @@ -203,7 +203,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error { tbNameStrings = append(tbNameStrings, toDel.tn.FQString()) } - descKey := sqlbase.MakeDescMetadataKey(n.dbDesc.ID) + descKey := sqlbase.MakeDescMetadataKey(p.ExecCfg().Codec, n.dbDesc.ID) b := &kv.Batch{} if p.ExtendedEvalContext().Tracing.KVTracingEnabled() { @@ -215,6 +215,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error { if err := sqlbase.RemoveSchemaNamespaceEntry( ctx, p.txn, + p.ExecCfg().Codec, n.dbDesc.ID, schemaToDelete, ); err != nil { @@ -223,7 +224,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error { } err := sqlbase.RemoveDatabaseNamespaceEntry( - ctx, p.txn, n.dbDesc.Name, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), + ctx, p.txn, p.ExecCfg().Codec, n.dbDesc.Name, p.ExtendedEvalContext().Tracing.KVTracingEnabled(), ) if err != nil { return err diff --git a/pkg/sql/drop_index.go b/pkg/sql/drop_index.go index babe94e3b35b..5fcd8d5291c1 100644 --- a/pkg/sql/drop_index.go +++ b/pkg/sql/drop_index.go @@ -483,7 +483,7 @@ func (p *planner) dropIndexByName( return err } - if err := tableDesc.Validate(ctx, p.txn); err != nil { + if err := tableDesc.Validate(ctx, p.txn, p.ExecCfg().Codec); err != nil { return err } mutationID := tableDesc.ClusterVersion.NextMutationID diff --git a/pkg/sql/drop_test.go b/pkg/sql/drop_test.go index 4630940b8b31..dfc913d2d82a 100644 --- a/pkg/sql/drop_test.go +++ b/pkg/sql/drop_test.go @@ -137,7 +137,7 @@ INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); t.Fatal(err) } - dbNameKey := sqlbase.NewDatabaseKey("t").Key() + dbNameKey := sqlbase.NewDatabaseKey("t").Key(keys.SystemSQLCodec) r, err := kvDB.Get(ctx, dbNameKey) if err != nil { t.Fatal(err) @@ -145,14 +145,14 @@ INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); if !r.Exists() { t.Fatalf(`database "t" does not exist`) } - dbDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(r.ValueInt())) + dbDescKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(r.ValueInt())) desc := &sqlbase.Descriptor{} if err := kvDB.GetProto(ctx, dbDescKey, desc); err != nil { t.Fatal(err) } dbDesc := desc.GetDatabase() - tbNameKey := sqlbase.NewPublicTableKey(dbDesc.ID, "kv").Key() + tbNameKey := sqlbase.NewPublicTableKey(dbDesc.ID, "kv").Key(keys.SystemSQLCodec) gr, err := kvDB.Get(ctx, tbNameKey) if err != nil { t.Fatal(err) @@ -160,7 +160,7 @@ INSERT INTO t.kv VALUES ('c', 'e'), ('a', 'c'), ('b', 'd'); if !gr.Exists() { t.Fatalf(`table "kv" does not exist`) } - tbDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt())) + tbDescKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(gr.ValueInt())) ts, err := kvDB.GetProtoTs(ctx, tbDescKey, desc) if err != nil { t.Fatal(err) @@ -260,7 +260,7 @@ CREATE DATABASE t; } dKey := sqlbase.NewDatabaseKey("t") - r, err := kvDB.Get(ctx, dKey.Key()) + r, err := kvDB.Get(ctx, dKey.Key(keys.SystemSQLCodec)) if err != nil { t.Fatal(err) } @@ -316,14 +316,14 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); } dKey := sqlbase.NewDatabaseKey("t") - r, err := kvDB.Get(ctx, dKey.Key()) + r, err := kvDB.Get(ctx, dKey.Key(keys.SystemSQLCodec)) if err != nil { t.Fatal(err) } if !r.Exists() { t.Fatalf(`database "t" does not exist`) } - dbDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(r.ValueInt())) + dbDescKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(r.ValueInt())) desc := &sqlbase.Descriptor{} if err := kvDB.GetProto(ctx, dbDescKey, desc); err != nil { t.Fatal(err) @@ -331,14 +331,14 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); dbDesc := desc.GetDatabase() tKey := sqlbase.NewPublicTableKey(dbDesc.ID, "kv") - gr, err := kvDB.Get(ctx, tKey.Key()) + gr, err := kvDB.Get(ctx, tKey.Key(keys.SystemSQLCodec)) if err != nil { t.Fatal(err) } if !gr.Exists() { t.Fatalf(`table "kv" does not exist`) } - tbDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr.ValueInt())) + tbDescKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(gr.ValueInt())) ts, err := kvDB.GetProtoTs(ctx, tbDescKey, desc) if err != nil { t.Fatal(err) @@ -346,14 +346,14 @@ INSERT INTO t.kv2 VALUES ('c', 'd'), ('a', 'b'), ('e', 'a'); tbDesc := desc.Table(ts) t2Key := sqlbase.NewPublicTableKey(dbDesc.ID, "kv2") - gr2, err := kvDB.Get(ctx, t2Key.Key()) + gr2, err := kvDB.Get(ctx, t2Key.Key(keys.SystemSQLCodec)) if err != nil { t.Fatal(err) } if !gr2.Exists() { t.Fatalf(`table "kv2" does not exist`) } - tb2DescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(gr2.ValueInt())) + tb2DescKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(gr2.ValueInt())) ts, err = kvDB.GetProtoTs(ctx, tb2DescKey, desc) if err != nil { t.Fatal(err) @@ -537,7 +537,7 @@ func TestDropIndex(t *testing.T) { if err := tests.CreateKVTable(sqlDB, "kv", numRows); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") tests.CheckKeyCount(t, kvDB, tableDesc.TableSpan(keys.SystemSQLCodec), 3*numRows) idx, _, err := tableDesc.FindIndexByName("foo") if err != nil { @@ -549,7 +549,7 @@ func TestDropIndex(t *testing.T) { t.Fatal(err) } - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") if _, _, err := tableDesc.FindIndexByName("foo"); err == nil { t.Fatalf("table descriptor still contains index after index is dropped") } @@ -575,7 +575,7 @@ func TestDropIndex(t *testing.T) { t.Fatal(err) } - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") newIdx, _, err := tableDesc.FindIndexByName("foo") if err != nil { t.Fatal(err) @@ -637,7 +637,7 @@ func TestDropIndexWithZoneConfigOSS(t *testing.T) { if err := tests.CreateKVTable(sqlDBRaw, "kv", numRows); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") indexDesc, _, err := tableDesc.FindIndexByName("foo") if err != nil { t.Fatal(err) @@ -676,7 +676,7 @@ func TestDropIndexWithZoneConfigOSS(t *testing.T) { // TODO(benesch): Run scrub here. It can't currently handle the way t.kv // declares column families. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") if _, _, err := tableDesc.FindIndexByName("foo"); err == nil { t.Fatalf("table descriptor still contains index after index is dropped") } @@ -697,7 +697,7 @@ func TestDropIndexInterleaved(t *testing.T) { numRows := 2*chunkSize + 1 tests.CreateKVInterleavedTable(t, sqlDB, numRows) - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") tableSpan := tableDesc.TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, 3*numRows) @@ -708,7 +708,7 @@ func TestDropIndexInterleaved(t *testing.T) { tests.CheckKeyCount(t, kvDB, tableSpan, 2*numRows) // Ensure that index is not active. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "intlv") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "intlv") if _, _, err := tableDesc.FindIndexByName("intlv_idx"); err == nil { t.Fatalf("table descriptor still contains index after index is dropped") } @@ -728,8 +728,8 @@ func TestDropTable(t *testing.T) { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") - nameKey := sqlbase.NewPublicTableKey(keys.MinNonPredefinedUserDescID, "kv").Key() + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") + nameKey := sqlbase.NewPublicTableKey(keys.MinNonPredefinedUserDescID, "kv").Key(keys.SystemSQLCodec) gr, err := kvDB.Get(ctx, nameKey) if err != nil { @@ -826,9 +826,9 @@ func TestDropTableDeleteData(t *testing.T) { t.Fatal(err) } - descs = append(descs, sqlbase.GetTableDescriptor(kvDB, "t", tableName)) + descs = append(descs, sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", tableName)) - nameKey := sqlbase.NewPublicTableKey(keys.MinNonPredefinedUserDescID, tableName).Key() + nameKey := sqlbase.NewPublicTableKey(keys.MinNonPredefinedUserDescID, tableName).Key(keys.SystemSQLCodec) gr, err := kvDB.Get(ctx, nameKey) if err != nil { t.Fatal(err) @@ -943,7 +943,7 @@ func writeTableDesc(ctx context.Context, db *kv.DB, tableDesc *sqlbase.TableDesc return err } tableDesc.ModificationTime = txn.CommitTimestamp() - return txn.Put(ctx, sqlbase.MakeDescMetadataKey(tableDesc.ID), sqlbase.WrapDescriptor(tableDesc)) + return txn.Put(ctx, sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), sqlbase.WrapDescriptor(tableDesc)) }) } @@ -978,7 +978,7 @@ func TestDropTableWhileUpgradingFormat(t *testing.T) { sqlutils.CreateTable(t, sqlDBRaw, "t", "a INT", numRows, sqlutils.ToRowFn(sqlutils.RowIdxFn)) // Give the table an old format version. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") tableDesc.FormatVersion = sqlbase.FamilyFormatVersion tableDesc.Version++ if err := writeTableDesc(ctx, kvDB, tableDesc); err != nil { @@ -993,7 +993,7 @@ func TestDropTableWhileUpgradingFormat(t *testing.T) { // Simulate a migration upgrading the table descriptor's format version after // the table has been dropped but before the truncation has occurred. var err error - tableDesc, err = sqlbase.GetTableDescFromID(ctx, kvDB.NewTxn(ctx, ""), tableDesc.ID) + tableDesc, err = sqlbase.GetTableDescFromID(ctx, kvDB.NewTxn(ctx, ""), keys.SystemSQLCodec, tableDesc.ID) if err != nil { t.Fatal(err) } @@ -1033,8 +1033,8 @@ func TestDropTableInterleavedDeleteData(t *testing.T) { numRows := 2*sql.TableTruncateChunkSize + 1 tests.CreateKVInterleavedTable(t, sqlDB, numRows) - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") - tableDescInterleaved := sqlbase.GetTableDescriptor(kvDB, "t", "intlv") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") + tableDescInterleaved := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "intlv") tableSpan := tableDesc.TableSpan(keys.SystemSQLCodec) tests.CheckKeyCount(t, kvDB, tableSpan, 3*numRows) @@ -1118,7 +1118,7 @@ func TestDropDatabaseAfterDropTable(t *testing.T) { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") if _, err := sqlDB.Exec(`DROP TABLE t.kv`); err != nil { t.Fatal(err) diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index 87010a277f20..cd59f998506f 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -807,7 +807,7 @@ var _ dbCacheSubscriber = &databaseCacheHolder{} // received. func (dc *databaseCacheHolder) updateSystemConfig(cfg *config.SystemConfig) { dc.mu.Lock() - dc.mu.c = newDatabaseCache(cfg) + dc.mu.c = newDatabaseCache(dc.mu.c.codec, cfg) dc.mu.cv.Broadcast() dc.mu.Unlock() } diff --git a/pkg/sql/flowinfra/cluster_test.go b/pkg/sql/flowinfra/cluster_test.go index 253d02481a8a..d66b19db9131 100644 --- a/pkg/sql/flowinfra/cluster_test.go +++ b/pkg/sql/flowinfra/cluster_test.go @@ -65,7 +65,7 @@ func TestClusterFlow(t *testing.T) { sqlutils.ToRowFn(sqlutils.RowIdxFn, sumDigitsFn, sqlutils.RowEnglishFn)) kvDB := tc.Server(0).DB() - desc := sqlbase.GetTableDescriptor(kvDB, "test", "t") + desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") makeIndexSpan := func(start, end int) execinfrapb.TableReaderSpan { var span roachpb.Span prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, desc, desc.Indexes[0].ID)) diff --git a/pkg/sql/flowinfra/server_test.go b/pkg/sql/flowinfra/server_test.go index 30086dacf5c6..daf863516a65 100644 --- a/pkg/sql/flowinfra/server_test.go +++ b/pkg/sql/flowinfra/server_test.go @@ -48,7 +48,7 @@ func TestServer(t *testing.T) { r.Exec(t, `CREATE TABLE test.t (a INT PRIMARY KEY, b INT)`) r.Exec(t, `INSERT INTO test.t VALUES (1, 10), (2, 20), (3, 30)`) - td := sqlbase.GetTableDescriptor(kvDB, "test", "t") + td := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") ts := execinfrapb.TableReaderSpec{ Table: *td, diff --git a/pkg/sql/gcjob/descriptor_utils.go b/pkg/sql/gcjob/descriptor_utils.go index 3987c6e067b8..d4d7bca37d1b 100644 --- a/pkg/sql/gcjob/descriptor_utils.go +++ b/pkg/sql/gcjob/descriptor_utils.go @@ -14,6 +14,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/config" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -53,9 +54,11 @@ func updateDescriptorGCMutations( } // dropTableDesc removes a descriptor from the KV database. -func dropTableDesc(ctx context.Context, db *kv.DB, tableDesc *sqlbase.TableDescriptor) error { +func dropTableDesc( + ctx context.Context, db *kv.DB, codec keys.SQLCodec, tableDesc *sqlbase.TableDescriptor, +) error { log.Infof(ctx, "removing table descriptor for table %d", tableDesc.ID) - descKey := sqlbase.MakeDescMetadataKey(tableDesc.ID) + descKey := sqlbase.MakeDescMetadataKey(codec, tableDesc.ID) zoneKeyPrefix := config.MakeZoneKeyPrefix(uint32(tableDesc.ID)) return db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { diff --git a/pkg/sql/gcjob/index_garbage_collection.go b/pkg/sql/gcjob/index_garbage_collection.go index 6284fdafc137..bfd836c127cf 100644 --- a/pkg/sql/gcjob/index_garbage_collection.go +++ b/pkg/sql/gcjob/index_garbage_collection.go @@ -40,7 +40,7 @@ func gcIndexes( var parentTable *sqlbase.TableDescriptor if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error - parentTable, err = sqlbase.GetTableDescFromID(ctx, txn, parentID) + parentTable, err = sqlbase.GetTableDescFromID(ctx, txn, execCfg.Codec, parentID) return err }); err != nil { return false, errors.Wrapf(err, "fetching parent table %d", parentID) diff --git a/pkg/sql/gcjob/refresh_statuses.go b/pkg/sql/gcjob/refresh_statuses.go index 91d0a29619ec..54f0074f769f 100644 --- a/pkg/sql/gcjob/refresh_statuses.go +++ b/pkg/sql/gcjob/refresh_statuses.go @@ -87,7 +87,7 @@ func updateStatusForGCElements( earliestDeadline := timeutil.Unix(0, int64(math.MaxInt64)) if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - table, err := sqlbase.GetTableDescFromID(ctx, txn, tableID) + table, err := sqlbase.GetTableDescFromID(ctx, txn, execCfg.Codec, tableID) if err != nil { return err } diff --git a/pkg/sql/gcjob/table_garbage_collection.go b/pkg/sql/gcjob/table_garbage_collection.go index 01384196f67a..8bea198f2768 100644 --- a/pkg/sql/gcjob/table_garbage_collection.go +++ b/pkg/sql/gcjob/table_garbage_collection.go @@ -45,7 +45,7 @@ func gcTables( var table *sqlbase.TableDescriptor if err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error - table, err = sqlbase.GetTableDescFromID(ctx, txn, droppedTable.ID) + table, err = sqlbase.GetTableDescFromID(ctx, txn, execCfg.Codec, droppedTable.ID) return err }); err != nil { return false, errors.Wrapf(err, "fetching table %d", droppedTable.ID) @@ -62,7 +62,7 @@ func gcTables( } // Finished deleting all the table data, now delete the table meta data. - if err := dropTableDesc(ctx, execCfg.DB, table); err != nil { + if err := dropTableDesc(ctx, execCfg.DB, execCfg.Codec, table); err != nil { return false, errors.Wrapf(err, "dropping table descriptor for table %d", table.ID) } diff --git a/pkg/sql/gcjob_test/gc_job_test.go b/pkg/sql/gcjob_test/gc_job_test.go index e268684001b1..dfdab5f2ba2d 100644 --- a/pkg/sql/gcjob_test/gc_job_test.go +++ b/pkg/sql/gcjob_test/gc_job_test.go @@ -81,11 +81,11 @@ func TestSchemaChangeGCJob(t *testing.T) { var myOtherTableDesc *sqlbase.TableDescriptor if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error - myTableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, myTableID) + myTableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, keys.SystemSQLCodec, myTableID) if err != nil { return err } - myOtherTableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, myOtherTableID) + myOtherTableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, keys.SystemSQLCodec, myOtherTableID) return err }); err != nil { t.Fatal(err) @@ -145,10 +145,10 @@ func TestSchemaChangeGCJob(t *testing.T) { if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() - descKey := sqlbase.MakeDescMetadataKey(myTableID) + descKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, myTableID) descDesc := sqlbase.WrapDescriptor(myTableDesc) b.Put(descKey, descDesc) - descKey2 := sqlbase.MakeDescMetadataKey(myOtherTableID) + descKey2 := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, myOtherTableID) descDesc2 := sqlbase.WrapDescriptor(myOtherTableDesc) b.Put(descKey2, descDesc2) return txn.Run(ctx, b) @@ -196,13 +196,13 @@ func TestSchemaChangeGCJob(t *testing.T) { if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error - myTableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, myTableID) + myTableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, keys.SystemSQLCodec, myTableID) if ttlTime != FUTURE && (dropItem == TABLE || dropItem == DATABASE) { // We dropped the table, so expect it to not be found. require.EqualError(t, err, "descriptor not found") return nil } - myOtherTableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, myOtherTableID) + myOtherTableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, keys.SystemSQLCodec, myOtherTableID) if ttlTime != FUTURE && dropItem == DATABASE { // We dropped the entire database, so expect none of the tables to be found. require.EqualError(t, err, "descriptor not found") diff --git a/pkg/sql/grant_revoke.go b/pkg/sql/grant_revoke.go index a45b00798f9e..7444018c4594 100644 --- a/pkg/sql/grant_revoke.go +++ b/pkg/sql/grant_revoke.go @@ -145,7 +145,15 @@ func (n *changePrivilegesNode) startExec(params runParams) error { if err := d.Validate(); err != nil { return err } - if err := writeDescToBatch(ctx, p.extendedEvalCtx.Tracing.KVTracingEnabled(), p.execCfg.Settings, b, descriptor.GetID(), descriptor); err != nil { + if err := writeDescToBatch( + ctx, + p.extendedEvalCtx.Tracing.KVTracingEnabled(), + p.ExecCfg().Settings, + b, + p.ExecCfg().Codec, + descriptor.GetID(), + descriptor, + ); err != nil { return err } diff --git a/pkg/sql/information_schema.go b/pkg/sql/information_schema.go index eec27e0508c4..f117fb5cdc58 100644 --- a/pkg/sql/information_schema.go +++ b/pkg/sql/information_schema.go @@ -1372,7 +1372,7 @@ https://www.postgresql.org/docs/9.5/infoschema-tables.html`, if err != nil || desc == nil { return false, err } - schemaName, err := schema.ResolveNameByID(ctx, p.txn, db.ID, desc.GetParentSchemaID()) + schemaName, err := schema.ResolveNameByID(ctx, p.txn, p.ExecCfg().Codec, db.ID, desc.GetParentSchemaID()) if err != nil { return false, err } @@ -1510,7 +1510,7 @@ func forEachDatabaseDesc( } else { // We can't just use dbContext here because we need to fetch the descriptor // with privileges from kv. - fetchedDbDesc, err := getDatabaseDescriptorsFromIDs(ctx, p.txn, []sqlbase.ID{dbContext.ID}) + fetchedDbDesc, err := getDatabaseDescriptorsFromIDs(ctx, p.txn, p.ExecCfg().Codec, []sqlbase.ID{dbContext.ID}) if err != nil { return err } diff --git a/pkg/sql/join_test.go b/pkg/sql/join_test.go index 9dbcf9bf829b..ca476225b6e5 100644 --- a/pkg/sql/join_test.go +++ b/pkg/sql/join_test.go @@ -22,7 +22,7 @@ import ( ) func newTestScanNode(kvDB *kv.DB, tableName string) (*scanNode, error) { - desc := sqlbase.GetImmutableTableDescriptor(kvDB, sqlutils.TestDB, tableName) + desc := sqlbase.GetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, tableName) p := planner{} scan := p.Scan() diff --git a/pkg/sql/lease.go b/pkg/sql/lease.go index 1f298e839e94..bccf170d30c6 100644 --- a/pkg/sql/lease.go +++ b/pkg/sql/lease.go @@ -143,6 +143,7 @@ type LeaseStore struct { clock *hlc.Clock internalExecutor sqlutil.InternalExecutor settings *cluster.Settings + codec keys.SQLCodec // group is used for all calls made to acquireNodeLease to prevent // concurrent lease acquisitions from the store. @@ -198,14 +199,14 @@ func (s LeaseStore) acquire( expiration = minExpiration.Add(int64(time.Millisecond), 0) } - tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, tableID) + tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, s.codec, tableID) if err != nil { return err } if err := FilterTableState(tableDesc); err != nil { return err } - if err := tableDesc.MaybeFillInDescriptor(ctx, txn); err != nil { + if err := tableDesc.MaybeFillInDescriptor(ctx, txn, s.codec); err != nil { return err } // Once the descriptor is set it is immutable and care must be taken @@ -317,7 +318,7 @@ func (s LeaseStore) WaitForOneVersion( // Get the current version of the table descriptor non-transactionally. // // TODO(pmattis): Do an inconsistent read here? - tableDesc, err = sqlbase.GetTableDescFromID(ctx, s.db, tableID) + tableDesc, err = sqlbase.GetTableDescFromID(ctx, s.db, s.codec, tableID) if err != nil { return 0, err } @@ -391,7 +392,7 @@ func (s LeaseStore) PublishMultiple( // Re-read the current versions of the table descriptor, this time // transactionally. var err error - descsToUpdate[id], err = sqlbase.GetMutableTableDescFromID(ctx, txn, id) + descsToUpdate[id], err = sqlbase.GetMutableTableDescFromID(ctx, txn, s.codec, id) if err != nil { return err } @@ -435,7 +436,7 @@ func (s LeaseStore) PublishMultiple( b := txn.NewBatch() for tableID, tableDesc := range tableDescs { - if err := writeDescToBatch(ctx, false /* kvTrace */, s.settings, b, tableID, tableDesc.TableDesc()); err != nil { + if err := writeDescToBatch(ctx, false /* kvTrace */, s.settings, b, s.codec, tableID, tableDesc.TableDesc()); err != nil { return err } } @@ -568,7 +569,7 @@ func (s LeaseStore) getForExpiration( ) (*tableVersionState, error) { var table *tableVersionState err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - descKey := sqlbase.MakeDescMetadataKey(id) + descKey := sqlbase.MakeDescMetadataKey(s.codec, id) prevTimestamp := expiration.Prev() txn.SetFixedTimestamp(ctx, prevTimestamp) var desc sqlbase.Descriptor @@ -583,7 +584,7 @@ func (s LeaseStore) getForExpiration( if prevTimestamp.LessEq(tableDesc.ModificationTime) { return errors.AssertionFailedf("unable to read table= (%d, %s)", id, expiration) } - if err := tableDesc.MaybeFillInDescriptor(ctx, txn); err != nil { + if err := tableDesc.MaybeFillInDescriptor(ctx, txn, s.codec); err != nil { return err } // Create a tableVersionState with the table and without a lease. @@ -1431,6 +1432,7 @@ func NewLeaseManager( clock *hlc.Clock, internalExecutor sqlutil.InternalExecutor, settings *cluster.Settings, + codec keys.SQLCodec, testingKnobs LeaseManagerTestingKnobs, stopper *stop.Stopper, cfg *base.LeaseManagerConfig, @@ -1442,6 +1444,7 @@ func NewLeaseManager( clock: clock, internalExecutor: internalExecutor, settings: settings, + codec: codec, group: &singleflight.Group{}, leaseDuration: cfg.TableDescriptorLeaseDuration, leaseJitterFraction: cfg.TableDescriptorLeaseJitterFraction, @@ -1622,7 +1625,7 @@ func (m *LeaseManager) resolveName( txn.SetFixedTimestamp(ctx, timestamp) var found bool var err error - found, id, err = sqlbase.LookupObjectID(ctx, txn, dbID, schemaID, tableName) + found, id, err = sqlbase.LookupObjectID(ctx, txn, m.codec, dbID, schemaID, tableName) if err != nil { return err } @@ -1773,7 +1776,7 @@ func (m *LeaseManager) findTableState(tableID sqlbase.ID, create bool) *tableSta func (m *LeaseManager) RefreshLeases(s *stop.Stopper, db *kv.DB, g *gossip.Gossip) { ctx := context.TODO() s.RunWorker(ctx, func(ctx context.Context) { - descKeyPrefix := keys.TODOSQLCodec.TablePrefix(uint32(sqlbase.DescriptorTable.ID)) + descKeyPrefix := m.codec.TablePrefix(uint32(sqlbase.DescriptorTable.ID)) cfgFilter := gossip.MakeSystemConfigDeltaFilter(descKeyPrefix) gossipUpdateC := g.RegisterSystemConfigChannel() for { @@ -1803,7 +1806,7 @@ func (m *LeaseManager) RefreshLeases(s *stop.Stopper, db *kv.DB, g *gossip.Gossi // Note that we don't need to "fill in" the descriptor here. Nobody // actually reads the table, but it's necessary for the call to // ValidateTable(). - if err := table.MaybeFillInDescriptor(ctx, nil); err != nil { + if err := table.MaybeFillInDescriptor(ctx, nil, m.codec); err != nil { log.Warningf(ctx, "%s: unable to fill in table descriptor %v", kv.Key, table) return } diff --git a/pkg/sql/lease_internal_test.go b/pkg/sql/lease_internal_test.go index 3054ba7e25aa..3ad8530b1426 100644 --- a/pkg/sql/lease_internal_test.go +++ b/pkg/sql/lease_internal_test.go @@ -148,7 +148,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") var tables []sqlbase.ImmutableTableDescriptor var expiration hlc.Timestamp @@ -271,7 +271,7 @@ CREATE TEMP TABLE t2 (temp int); } for _, tableName := range []string{"t", "t2"} { - tableDesc := sqlbase.GetTableDescriptor(kvDB, "defaultdb", tableName) + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "defaultdb", tableName) lease := leaseManager.tableNames.get( tableDesc.ParentID, sqlbase.ID(keys.PublicSchemaID), @@ -304,7 +304,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Rename. if _, err := db.Exec("ALTER TABLE t.test RENAME TO t.test2;"); err != nil { @@ -333,7 +333,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); } // Re-read the descriptor, to get the new ParentID. - newTableDesc := sqlbase.GetTableDescriptor(kvDB, "t1", "test2") + newTableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t1", "test2") if tableDesc.ParentID == newTableDesc.ParentID { t.Fatalf("database didn't change") } @@ -376,7 +376,7 @@ CREATE TABLE t.%s (k CHAR PRIMARY KEY, v CHAR); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", tableName) + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", tableName) // Check the assumptions this tests makes: that there is a cache entry // (with a valid lease). @@ -421,7 +421,7 @@ CREATE TABLE t.%s (k CHAR PRIMARY KEY, v CHAR); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", tableName) + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", tableName) // Populate the name cache. if _, err := db.Exec("SELECT * FROM t.test;"); err != nil { @@ -483,7 +483,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Check that we cannot get the table by a different name. if leaseManager.tableNames.get(tableDesc.ParentID, tableDesc.GetParentSchemaID(), "tEsT", s.Clock().Now()) != nil { @@ -519,7 +519,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Populate the name cache. ctx := context.TODO() @@ -635,7 +635,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") var wg sync.WaitGroup numRoutines := 10 @@ -687,7 +687,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") var wg sync.WaitGroup numRoutines := 10 diff --git a/pkg/sql/lease_test.go b/pkg/sql/lease_test.go index 7a3ba6d6f166..d710606b7cad 100644 --- a/pkg/sql/lease_test.go +++ b/pkg/sql/lease_test.go @@ -211,6 +211,7 @@ func (t *leaseTest) node(nodeID uint32) *sql.LeaseManager { cfgCpy.Clock, cfgCpy.InternalExecutor, cfgCpy.Settings, + cfgCpy.Codec, t.leaseManagerTestingKnobs, t.server.Stopper(), t.cfg, @@ -559,7 +560,7 @@ CREATE TABLE test.t(a INT PRIMARY KEY); } // Make sure we can't get a lease on the descriptor. - tableDesc := sqlbase.GetTableDescriptor(t.kvDB, "test", "t") + tableDesc := sqlbase.GetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "test", "t") // try to acquire at a bogus version to make sure we don't get back a lease we // already had. _, _, err = t.acquireMinVersion(1, tableDesc.ID, tableDesc.Version+1) @@ -569,7 +570,7 @@ CREATE TABLE test.t(a INT PRIMARY KEY); } func isDeleted(tableID sqlbase.ID, cfg *config.SystemConfig) bool { - descKey := sqlbase.MakeDescMetadataKey(tableID) + descKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableID) val := cfg.GetValue(descKey) if val == nil { return false @@ -636,7 +637,7 @@ CREATE TABLE test.t(a INT PRIMARY KEY); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") ctx := context.TODO() lease1, _, err := acquire(ctx, s.(*server.TestServer), tableDesc.ID) @@ -735,7 +736,7 @@ CREATE TABLE t.foo (v INT); t.Fatalf("CREATE TABLE has acquired a lease: got %d, expected 0", atomic.LoadInt32(&fooAcquiredCount)) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "foo") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "foo") atomic.StoreInt64(&tableID, int64(tableDesc.ID)) if _, err := sqlDB.Exec(` @@ -862,7 +863,7 @@ CREATE TABLE t.foo (v INT); t.Fatalf("CREATE TABLE has acquired a descriptor") } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "foo") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "foo") atomic.StoreInt64(&tableID, int64(tableDesc.ID)) tx, err := sqlDB.Begin() @@ -927,7 +928,7 @@ INSERT INTO t.kv VALUES ('a', 'b'); `); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") // A read-write transaction that uses the old version of the descriptor. txReadWrite, err := sqlDB.Begin() @@ -1097,7 +1098,7 @@ INSERT INTO t.kv VALUES ('a', 'b'); } testutils.SucceedsSoon(t, func() error { - if tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv"); len(tableDesc.GCMutations) != 0 { + if tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv"); len(tableDesc.GCMutations) != 0 { return errors.Errorf("%d gc mutations remaining", len(tableDesc.GCMutations)) } return nil @@ -1145,7 +1146,7 @@ COMMIT; t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") tx, err := sqlDB.Begin() if err != nil { @@ -1207,7 +1208,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(t.kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "test") dbID := tableDesc.ParentID tableName := tableDesc.Name leaseManager := t.node(1) @@ -1291,8 +1292,8 @@ CREATE TABLE t.test2 (); t.Fatal(err) } - test1Desc := sqlbase.GetTableDescriptor(t.kvDB, "t", "test1") - test2Desc := sqlbase.GetTableDescriptor(t.kvDB, "t", "test2") + test1Desc := sqlbase.GetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "test1") + test2Desc := sqlbase.GetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "test2") dbID := test2Desc.ParentID // Acquire a lease on test1 by name. @@ -1425,7 +1426,7 @@ CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") if tableDesc.Version != 1 { t.Fatalf("invalid version %d", tableDesc.Version) } @@ -1446,7 +1447,7 @@ CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR); } // The first schema change will succeed and increment the version. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") if tableDesc.Version != 2 { t.Fatalf("invalid version %d", tableDesc.Version) } @@ -1476,7 +1477,7 @@ CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR); // the table descriptor. If the schema change transaction // doesn't rollback the transaction this descriptor read will // hang. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") if tableDesc.Version != 2 { t.Fatalf("invalid version %d", tableDesc.Version) } @@ -1487,7 +1488,7 @@ CREATE TABLE t.kv (k CHAR PRIMARY KEY, v CHAR); } wg.Wait() - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") if tableDesc.Version != 3 { t.Fatalf("invalid version %d", tableDesc.Version) } @@ -1528,7 +1529,7 @@ INSERT INTO t.kv VALUES ('a', 'b'); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") if tableDesc.Version != 1 { t.Fatalf("invalid version %d", tableDesc.Version) } @@ -1667,7 +1668,7 @@ CREATE TABLE t.test0 (k CHAR PRIMARY KEY, v CHAR); txn.SetFixedTimestamp(ctx, table.ModificationTime) // Look up the descriptor. - descKey := sqlbase.MakeDescMetadataKey(descID) + descKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, descID) dbDesc := &sqlbase.Descriptor{} ts, err := txn.GetProtoTs(ctx, descKey, dbDesc) if err != nil { @@ -1748,8 +1749,8 @@ CREATE TABLE t.test2 (); t.Fatal(err) } - test1Desc := sqlbase.GetTableDescriptor(t.kvDB, "t", "test2") - test2Desc := sqlbase.GetTableDescriptor(t.kvDB, "t", "test2") + test1Desc := sqlbase.GetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "test2") + test2Desc := sqlbase.GetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "test2") dbID := test2Desc.ParentID atomic.StoreInt32(&testAcquisitionBlockCount, 0) @@ -1945,8 +1946,8 @@ CREATE TABLE t.after (k CHAR PRIMARY KEY, v CHAR); t.Fatal(err) } - beforeDesc := sqlbase.GetTableDescriptor(t.kvDB, "t", "before") - afterDesc := sqlbase.GetTableDescriptor(t.kvDB, "t", "after") + beforeDesc := sqlbase.GetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "before") + afterDesc := sqlbase.GetTableDescriptor(t.kvDB, keys.SystemSQLCodec, "t", "after") dbID := beforeDesc.ParentID // Acquire a lease on "before" by name. diff --git a/pkg/sql/logical_schema_accessors.go b/pkg/sql/logical_schema_accessors.go index cfd012fa3e3f..2df00e0c1ca5 100644 --- a/pkg/sql/logical_schema_accessors.go +++ b/pkg/sql/logical_schema_accessors.go @@ -13,6 +13,7 @@ package sql import ( "context" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -35,20 +36,21 @@ var _ SchemaAccessor = &LogicalSchemaAccessor{} // IsValidSchema implements the DatabaseLister interface. func (l *LogicalSchemaAccessor) IsValidSchema( - ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, scName string, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, dbID sqlbase.ID, scName string, ) (bool, sqlbase.ID, error) { if _, ok := l.vt.getVirtualSchemaEntry(scName); ok { return true, sqlbase.InvalidID, nil } // Fallthrough. - return l.SchemaAccessor.IsValidSchema(ctx, txn, dbID, scName) + return l.SchemaAccessor.IsValidSchema(ctx, txn, codec, dbID, scName) } // GetObjectNames implements the DatabaseLister interface. func (l *LogicalSchemaAccessor) GetObjectNames( ctx context.Context, txn *kv.Txn, + codec keys.SQLCodec, dbDesc *DatabaseDescriptor, scName string, flags tree.DatabaseListFlags, @@ -66,7 +68,7 @@ func (l *LogicalSchemaAccessor) GetObjectNames( } // Fallthrough. - return l.SchemaAccessor.GetObjectNames(ctx, txn, dbDesc, scName, flags) + return l.SchemaAccessor.GetObjectNames(ctx, txn, codec, dbDesc, scName, flags) } // GetObjectDesc implements the ObjectAccessor interface. @@ -74,6 +76,7 @@ func (l *LogicalSchemaAccessor) GetObjectDesc( ctx context.Context, txn *kv.Txn, settings *cluster.Settings, + codec keys.SQLCodec, name *ObjectName, flags tree.ObjectLookupFlags, ) (ObjectDescriptor, error) { @@ -97,5 +100,5 @@ func (l *LogicalSchemaAccessor) GetObjectDesc( } // Fallthrough. - return l.SchemaAccessor.GetObjectDesc(ctx, txn, settings, name, flags) + return l.SchemaAccessor.GetObjectDesc(ctx, txn, settings, codec, name, flags) } diff --git a/pkg/sql/namespace_test.go b/pkg/sql/namespace_test.go index c1d627d27185..d10fbb85a423 100644 --- a/pkg/sql/namespace_test.go +++ b/pkg/sql/namespace_test.go @@ -39,7 +39,7 @@ func TestNamespaceTableSemantics(t *testing.T) { idCounter := keys.MinNonPredefinedUserDescID // Database name. - dKey := sqlbase.NewDeprecatedDatabaseKey("test").Key() + dKey := sqlbase.NewDeprecatedDatabaseKey("test").Key(keys.SystemSQLCodec) if gr, err := kvDB.Get(ctx, dKey); err != nil { t.Fatal(err) } else if gr.Exists() { @@ -86,7 +86,7 @@ func TestNamespaceTableSemantics(t *testing.T) { } else if gr.Exists() { t.Fatal("database key unexpectedly found in the deprecated system.namespace") } - newDKey := sqlbase.NewDatabaseKey("test").Key() + newDKey := sqlbase.NewDatabaseKey("test").Key(keys.SystemSQLCodec) if gr, err := kvDB.Get(ctx, newDKey); err != nil { t.Fatal(err) } else if !gr.Exists() { @@ -94,7 +94,7 @@ func TestNamespaceTableSemantics(t *testing.T) { } txn := kvDB.NewTxn(ctx, "lookup-test-db-id") - found, dbID, err := sqlbase.LookupDatabaseID(ctx, txn, "test") + found, dbID, err := sqlbase.LookupDatabaseID(ctx, txn, keys.SystemSQLCodec, "test") if err != nil { t.Fatal(err) } @@ -103,7 +103,7 @@ func TestNamespaceTableSemantics(t *testing.T) { } // Simulate the same test for a table and sequence. - tKey := sqlbase.NewDeprecatedTableKey(dbID, "rel").Key() + tKey := sqlbase.NewDeprecatedTableKey(dbID, "rel").Key(keys.SystemSQLCodec) if err := kvDB.CPut(ctx, tKey, idCounter, nil); err != nil { t.Fatal(err) } @@ -160,7 +160,7 @@ func TestNamespaceTableSemantics(t *testing.T) { } else if gr.Exists() { t.Fatal("table key unexpectedly found in the deprecated system.namespace") } - newTKey := sqlbase.NewPublicTableKey(dbID, "rel").Key() + newTKey := sqlbase.NewPublicTableKey(dbID, "rel").Key(keys.SystemSQLCodec) if gr, err := kvDB.Get(ctx, newTKey); err != nil { t.Fatal(err) } else if !gr.Exists() { diff --git a/pkg/sql/old_foreign_key_desc_test.go b/pkg/sql/old_foreign_key_desc_test.go index 3bfc8ecacc9a..fc0a8a0f0409 100644 --- a/pkg/sql/old_foreign_key_desc_test.go +++ b/pkg/sql/old_foreign_key_desc_test.go @@ -15,6 +15,7 @@ import ( "reflect" "testing" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/tests" @@ -44,8 +45,8 @@ CREATE INDEX ON t.t1 (x); `); err != nil { t.Fatal(err) } - desc := sqlbase.GetTableDescriptor(kvDB, "t", "t1") - desc = sqlbase.GetTableDescriptor(kvDB, "t", "t2") + desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "t1") + desc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "t2") // Remember the old foreign keys. oldInboundFKs := append([]sqlbase.ForeignKeyConstraint{}, desc.InboundFKs...) // downgradeForeignKey downgrades a table descriptor's foreign key representation @@ -59,7 +60,7 @@ CREATE INDEX ON t.t1 (x); if err != nil { t.Fatal(err) } - referencedTbl, err := sqlbase.GetTableDescFromID(ctx, kvDB, fk.ReferencedTableID) + referencedTbl, err := sqlbase.GetTableDescFromID(ctx, kvDB, keys.SystemSQLCodec, fk.ReferencedTableID) if err != nil { t.Fatal(err) } @@ -86,7 +87,7 @@ CREATE INDEX ON t.t1 (x); if err != nil { t.Fatal(err) } - originTbl, err := sqlbase.GetTableDescFromID(ctx, kvDB, fk.OriginTableID) + originTbl, err := sqlbase.GetTableDescFromID(ctx, kvDB, keys.SystemSQLCodec, fk.OriginTableID) if err != nil { t.Fatal(err) } @@ -107,7 +108,7 @@ CREATE INDEX ON t.t1 (x); err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() newDesc := downgradeForeignKey(desc) - if err := writeDescToBatch(ctx, false, s.ClusterSettings(), b, desc.ID, newDesc); err != nil { + if err := writeDescToBatch(ctx, false, s.ClusterSettings(), b, keys.SystemSQLCodec, desc.ID, newDesc); err != nil { return err } return txn.Run(ctx, b) @@ -119,7 +120,7 @@ CREATE INDEX ON t.t1 (x); if _, err := sqlDB.Exec(`DROP INDEX t.t1@t1_auto_index_fk1`); err != nil { t.Fatal(err) } - desc = sqlbase.GetTableDescriptor(kvDB, "t", "t2") + desc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "t2") // Remove the validity field on all the descriptors for comparison, since // foreign keys on the referenced side's validity is not always updated correctly. for i := range desc.InboundFKs { diff --git a/pkg/sql/opt_catalog.go b/pkg/sql/opt_catalog.go index ffe6fdfa9ae6..9341ba43dfe8 100644 --- a/pkg/sql/opt_catalog.go +++ b/pkg/sql/opt_catalog.go @@ -114,8 +114,10 @@ func (os *optSchema) Name() *cat.SchemaName { // GetDataSourceNames is part of the cat.Schema interface. func (os *optSchema) GetDataSourceNames(ctx context.Context) ([]cat.DataSourceName, error) { return GetObjectNames( - ctx, os.planner.Txn(), + ctx, + os.planner.Txn(), os.planner, + os.planner.ExecCfg().Codec, os.desc, os.name.Schema(), true, /* explicitPrefix */ @@ -290,7 +292,7 @@ func (oc *optCatalog) fullyQualifiedNameWithTxn( } dbID := desc.ParentID - dbDesc, err := sqlbase.GetDatabaseDescFromID(ctx, txn, dbID) + dbDesc, err := sqlbase.GetDatabaseDescFromID(ctx, txn, oc.codec(), dbID) if err != nil { return cat.DataSourceName{}, err } @@ -370,7 +372,7 @@ func (oc *optCatalog) dataSourceForTable( return ds, nil } - ds, err := newOptTable(desc, oc.planner.ExecCfg().Codec, tableStats, zoneConfig) + ds, err := newOptTable(desc, oc.codec(), tableStats, zoneConfig) if err != nil { return nil, err } @@ -404,6 +406,10 @@ func (oc *optCatalog) getZoneConfig( return zone, err } +func (oc *optCatalog) codec() keys.SQLCodec { + return oc.planner.ExecCfg().Codec +} + // optView is a wrapper around sqlbase.ImmutableTableDescriptor that implements // the cat.Object, cat.DataSource, and cat.View interfaces. type optView struct { diff --git a/pkg/sql/partition_test.go b/pkg/sql/partition_test.go index a373f38cb02e..19b4895a701f 100644 --- a/pkg/sql/partition_test.go +++ b/pkg/sql/partition_test.go @@ -15,6 +15,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/tests" @@ -40,8 +41,8 @@ func TestRemovePartitioningOSS(t *testing.T) { if err := tests.CreateKVTable(sqlDBRaw, "kv", numRows); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "kv") - tableKey := sqlbase.MakeDescMetadataKey(tableDesc.ID) + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "kv") + tableKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID) // Hack in partitions. Doing this properly requires a CCL binary. tableDesc.PrimaryIndex.Partitioning = sqlbase.PartitioningDescriptor{ diff --git a/pkg/sql/pg_catalog.go b/pkg/sql/pg_catalog.go index 402f7520403b..c228b034452a 100644 --- a/pkg/sql/pg_catalog.go +++ b/pkg/sql/pg_catalog.go @@ -1053,7 +1053,7 @@ func makeAllRelationsVirtualTableWithDescriptorIDIndex( } h := makeOidHasher() resolver := oneAtATimeSchemaResolver{p: p, ctx: ctx} - scName, err := schema.ResolveNameByID(ctx, p.txn, db.ID, table.Desc.GetParentSchemaID()) + scName, err := schema.ResolveNameByID(ctx, p.txn, p.ExecCfg().Codec, db.ID, table.Desc.GetParentSchemaID()) if err != nil { return false, err } diff --git a/pkg/sql/pgwire_internal_test.go b/pkg/sql/pgwire_internal_test.go index c4498b307777..bceaaf1364c8 100644 --- a/pkg/sql/pgwire_internal_test.go +++ b/pkg/sql/pgwire_internal_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -62,7 +63,7 @@ func TestPGWireConnectionCloseReleasesLeases(t *testing.T) { t.Fatal(err) } // Verify that there are no leases held. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") lm := s.LeaseManager().(*LeaseManager) // Looking for a table state validates that there used to be a lease on the // table. diff --git a/pkg/sql/physical_schema_accessors.go b/pkg/sql/physical_schema_accessors.go index 415cd77e5775..b8db0a5ef1f9 100644 --- a/pkg/sql/physical_schema_accessors.go +++ b/pkg/sql/physical_schema_accessors.go @@ -14,6 +14,7 @@ import ( "bytes" "context" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -46,7 +47,11 @@ var _ SchemaAccessor = UncachedPhysicalAccessor{} // GetDatabaseDesc implements the SchemaAccessor interface. func (a UncachedPhysicalAccessor) GetDatabaseDesc( - ctx context.Context, txn *kv.Txn, name string, flags tree.DatabaseLookupFlags, + ctx context.Context, + txn *kv.Txn, + codec keys.SQLCodec, + name string, + flags tree.DatabaseLookupFlags, ) (desc *DatabaseDescriptor, err error) { if name == sqlbase.SystemDB.Name { // We can't return a direct reference to SystemDB, because the @@ -55,7 +60,7 @@ func (a UncachedPhysicalAccessor) GetDatabaseDesc( return &sysDB, nil } - found, descID, err := sqlbase.LookupDatabaseID(ctx, txn, name) + found, descID, err := sqlbase.LookupDatabaseID(ctx, txn, codec, name) if err != nil { return nil, err } else if !found { @@ -66,7 +71,7 @@ func (a UncachedPhysicalAccessor) GetDatabaseDesc( } desc = &sqlbase.DatabaseDescriptor{} - if err := getDescriptorByID(ctx, txn, descID, desc); err != nil { + if err := getDescriptorByID(ctx, txn, codec, descID, desc); err != nil { return nil, err } @@ -75,20 +80,21 @@ func (a UncachedPhysicalAccessor) GetDatabaseDesc( // IsValidSchema implements the SchemaAccessor interface. func (a UncachedPhysicalAccessor) IsValidSchema( - ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, scName string, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, dbID sqlbase.ID, scName string, ) (bool, sqlbase.ID, error) { - return resolveSchemaID(ctx, txn, dbID, scName) + return resolveSchemaID(ctx, txn, codec, dbID, scName) } // GetObjectNames implements the SchemaAccessor interface. func (a UncachedPhysicalAccessor) GetObjectNames( ctx context.Context, txn *kv.Txn, + codec keys.SQLCodec, dbDesc *DatabaseDescriptor, scName string, flags tree.DatabaseListFlags, ) (TableNames, error) { - ok, schemaID, err := a.IsValidSchema(ctx, txn, dbDesc.ID, scName) + ok, schemaID, err := a.IsValidSchema(ctx, txn, codec, dbDesc.ID, scName) if err != nil { return nil, err } @@ -101,7 +107,7 @@ func (a UncachedPhysicalAccessor) GetObjectNames( } log.Eventf(ctx, "fetching list of objects for %q", dbDesc.Name) - prefix := sqlbase.NewTableKey(dbDesc.ID, schemaID, "").Key() + prefix := sqlbase.NewTableKey(dbDesc.ID, schemaID, "").Key(codec) sr, err := txn.Scan(ctx, prefix, prefix.PrefixEnd(), 0) if err != nil { return nil, err @@ -124,7 +130,7 @@ func (a UncachedPhysicalAccessor) GetObjectNames( // will only be present in the older system.namespace. To account for this // scenario, we must do this filtering logic. // TODO(solon): This complexity can be removed in 20.2. - dprefix := sqlbase.NewDeprecatedTableKey(dbDesc.ID, "").Key() + dprefix := sqlbase.NewDeprecatedTableKey(dbDesc.ID, "").Key(codec) dsr, err := txn.Scan(ctx, dprefix, dprefix.PrefixEnd(), 0) if err != nil { return nil, err @@ -169,17 +175,18 @@ func (a UncachedPhysicalAccessor) GetObjectDesc( ctx context.Context, txn *kv.Txn, settings *cluster.Settings, + codec keys.SQLCodec, name *ObjectName, flags tree.ObjectLookupFlags, ) (ObjectDescriptor, error) { // Look up the database ID. - dbID, err := getDatabaseID(ctx, txn, name.Catalog(), flags.Required) + dbID, err := getDatabaseID(ctx, txn, codec, name.Catalog(), flags.Required) if err != nil || dbID == sqlbase.InvalidID { // dbID can still be invalid if required is false and the database is not found. return nil, err } - ok, schemaID, err := a.IsValidSchema(ctx, txn, dbID, name.Schema()) + ok, schemaID, err := a.IsValidSchema(ctx, txn, codec, dbID, name.Schema()) if err != nil { return nil, err } @@ -197,7 +204,7 @@ func (a UncachedPhysicalAccessor) GetObjectDesc( descID := sqlbase.LookupSystemTableDescriptorID(ctx, settings, dbID, name.Table()) if descID == sqlbase.InvalidID { var found bool - found, descID, err = sqlbase.LookupObjectID(ctx, txn, dbID, schemaID, name.Table()) + found, descID, err = sqlbase.LookupObjectID(ctx, txn, codec, dbID, schemaID, name.Table()) if err != nil { return nil, err } @@ -212,7 +219,7 @@ func (a UncachedPhysicalAccessor) GetObjectDesc( // Look up the table using the discovered database descriptor. desc := &sqlbase.TableDescriptor{} - err = getDescriptorByID(ctx, txn, descID, desc) + err = getDescriptorByID(ctx, txn, codec, descID, desc) if err != nil { return nil, err } @@ -255,7 +262,11 @@ var _ SchemaAccessor = &CachedPhysicalAccessor{} // GetDatabaseDesc implements the SchemaAccessor interface. func (a *CachedPhysicalAccessor) GetDatabaseDesc( - ctx context.Context, txn *kv.Txn, name string, flags tree.DatabaseLookupFlags, + ctx context.Context, + txn *kv.Txn, + codec keys.SQLCodec, + name string, + flags tree.DatabaseLookupFlags, ) (desc *DatabaseDescriptor, err error) { isSystemDB := name == sqlbase.SystemDB.Name if !(flags.AvoidCached || isSystemDB || testDisableTableLeases) { @@ -280,12 +291,12 @@ func (a *CachedPhysicalAccessor) GetDatabaseDesc( } // We avoided the cache. Go lower. - return a.SchemaAccessor.GetDatabaseDesc(ctx, txn, name, flags) + return a.SchemaAccessor.GetDatabaseDesc(ctx, txn, codec, name, flags) } // IsValidSchema implements the SchemaAccessor interface. func (a *CachedPhysicalAccessor) IsValidSchema( - ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, scName string, + ctx context.Context, txn *kv.Txn, _ keys.SQLCodec, dbID sqlbase.ID, scName string, ) (bool, sqlbase.ID, error) { return a.tc.resolveSchemaID(ctx, txn, dbID, scName) } @@ -295,6 +306,7 @@ func (a *CachedPhysicalAccessor) GetObjectDesc( ctx context.Context, txn *kv.Txn, settings *cluster.Settings, + codec keys.SQLCodec, name *ObjectName, flags tree.ObjectLookupFlags, ) (ObjectDescriptor, error) { diff --git a/pkg/sql/physicalplan/aggregator_funcs_test.go b/pkg/sql/physicalplan/aggregator_funcs_test.go index 7dc2006a009f..c468583ddd9e 100644 --- a/pkg/sql/physicalplan/aggregator_funcs_test.go +++ b/pkg/sql/physicalplan/aggregator_funcs_test.go @@ -18,6 +18,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/distsql" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -436,7 +437,7 @@ func TestDistAggregationTable(t *testing.T) { ) kvDB := tc.Server(0).DB() - desc := sqlbase.GetTableDescriptor(kvDB, "test", "t") + desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") for fn, info := range DistAggregationTable { if fn == execinfrapb.AggregatorSpec_ANY_NOT_NULL { diff --git a/pkg/sql/physicalplan/fake_span_resolver_test.go b/pkg/sql/physicalplan/fake_span_resolver_test.go index 60ff3182e5bd..ddad17ca282b 100644 --- a/pkg/sql/physicalplan/fake_span_resolver_test.go +++ b/pkg/sql/physicalplan/fake_span_resolver_test.go @@ -53,7 +53,7 @@ func TestFakeSpanResolver(t *testing.T) { txn := kv.NewTxn(ctx, db, tc.Server(0).NodeID()) it := resolver.NewSpanResolverIterator(txn) - tableDesc := sqlbase.GetTableDescriptor(db, "test", "t") + tableDesc := sqlbase.GetTableDescriptor(db, keys.SystemSQLCodec, "test", "t") primIdxValDirs := sqlbase.IndexKeyValDirs(&tableDesc.PrimaryIndex) span := tableDesc.PrimaryIndexSpan(keys.SystemSQLCodec) diff --git a/pkg/sql/physicalplan/span_resolver_test.go b/pkg/sql/physicalplan/span_resolver_test.go index 31fbee285922..22d4f731baef 100644 --- a/pkg/sql/physicalplan/span_resolver_test.go +++ b/pkg/sql/physicalplan/span_resolver_test.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -327,7 +328,7 @@ func setupRanges( } } - tableDesc := sqlbase.GetTableDescriptor(cdb, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(cdb, keys.SystemSQLCodec, "t", "test") // Split every SQL row to its own range. rowRanges := make([]roachpb.RangeDescriptor, len(values)) for i, val := range values { diff --git a/pkg/sql/privileged_accessor.go b/pkg/sql/privileged_accessor.go index fccd3e09c069..3e92ea331265 100644 --- a/pkg/sql/privileged_accessor.go +++ b/pkg/sql/privileged_accessor.go @@ -98,7 +98,7 @@ func (p *planner) LookupZoneConfigByNamespaceID( // to check the permissions of a descriptor given its ID, or the id given // is not a descriptor of a table or database. func (p *planner) checkDescriptorPermissions(ctx context.Context, id sqlbase.ID) error { - desc, found, err := lookupDescriptorByID(ctx, p.txn, id) + desc, found, err := lookupDescriptorByID(ctx, p.txn, p.ExecCfg().Codec, id) if err != nil { return err } diff --git a/pkg/sql/privileged_accessor_test.go b/pkg/sql/privileged_accessor_test.go index b1a695c80dee..7bd30057546a 100644 --- a/pkg/sql/privileged_accessor_test.go +++ b/pkg/sql/privileged_accessor_test.go @@ -14,6 +14,7 @@ import ( "context" "testing" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/tests" @@ -36,7 +37,7 @@ func TestLookupNamespaceIDFallback(t *testing.T) { err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return txn.Put( ctx, - sqlbase.NewDeprecatedTableKey(999, "bob").Key(), + sqlbase.NewDeprecatedTableKey(999, "bob").Key(keys.SystemSQLCodec), 9999, ) }) diff --git a/pkg/sql/rename_column.go b/pkg/sql/rename_column.go index 530aaf565b1f..810606a51733 100644 --- a/pkg/sql/rename_column.go +++ b/pkg/sql/rename_column.go @@ -71,7 +71,7 @@ func (n *renameColumnNode) startExec(params runParams) error { return nil } - if err := tableDesc.Validate(ctx, p.txn); err != nil { + if err := tableDesc.Validate(ctx, p.txn, p.ExecCfg().Codec); err != nil { return err } diff --git a/pkg/sql/rename_database.go b/pkg/sql/rename_database.go index a76e325deee5..cd15c1a95895 100644 --- a/pkg/sql/rename_database.go +++ b/pkg/sql/rename_database.go @@ -94,6 +94,7 @@ func (n *renameDatabaseNode) startExec(params runParams) error { tbNames, err := phyAccessor.GetObjectNames( ctx, p.txn, + p.ExecCfg().Codec, dbDesc, schema, tree.DatabaseListFlags{ @@ -106,8 +107,14 @@ func (n *renameDatabaseNode) startExec(params runParams) error { } lookupFlags.Required = false for i := range tbNames { - objDesc, err := phyAccessor.GetObjectDesc(ctx, p.txn, p.ExecCfg().Settings, - &tbNames[i], tree.ObjectLookupFlags{CommonLookupFlags: lookupFlags}) + objDesc, err := phyAccessor.GetObjectDesc( + ctx, + p.txn, + p.ExecCfg().Settings, + p.ExecCfg().Codec, + &tbNames[i], + tree.ObjectLookupFlags{CommonLookupFlags: lookupFlags}, + ) if err != nil { return err } @@ -116,7 +123,7 @@ func (n *renameDatabaseNode) startExec(params runParams) error { } tbDesc := objDesc.TableDesc() for _, dependedOn := range tbDesc.DependedOnBy { - dependentDesc, err := sqlbase.GetTableDescFromID(ctx, p.txn, dependedOn.ID) + dependentDesc, err := sqlbase.GetTableDescFromID(ctx, p.txn, p.ExecCfg().Codec, dependedOn.ID) if err != nil { return err } diff --git a/pkg/sql/rename_index.go b/pkg/sql/rename_index.go index 867c2fc8e2a1..bb564091b633 100644 --- a/pkg/sql/rename_index.go +++ b/pkg/sql/rename_index.go @@ -96,7 +96,7 @@ func (n *renameIndexNode) startExec(params runParams) error { return err } - if err := tableDesc.Validate(ctx, p.txn); err != nil { + if err := tableDesc.Validate(ctx, p.txn, p.ExecCfg().Codec); err != nil { return err } diff --git a/pkg/sql/rename_table.go b/pkg/sql/rename_table.go index 4cb3b7bd0ae9..97e1056296d5 100644 --- a/pkg/sql/rename_table.go +++ b/pkg/sql/rename_table.go @@ -115,9 +115,9 @@ func (n *renameTableNode) startExec(params runParams) error { tableDesc.ParentID = targetDbDesc.ID newTbKey := sqlbase.MakePublicTableNameKey(ctx, params.ExecCfg().Settings, - targetDbDesc.ID, newTn.Table()).Key() + targetDbDesc.ID, newTn.Table()).Key(p.ExecCfg().Codec) - if err := tableDesc.Validate(ctx, p.txn); err != nil { + if err := tableDesc.Validate(ctx, p.txn, p.ExecCfg().Codec); err != nil { return err } @@ -143,13 +143,13 @@ func (n *renameTableNode) startExec(params runParams) error { log.VEventf(ctx, 2, "CPut %s -> %d", newTbKey, descID) } err = writeDescToBatch(ctx, p.extendedEvalCtx.Tracing.KVTracingEnabled(), - p.EvalContext().Settings, b, descID, tableDesc.TableDesc()) + p.EvalContext().Settings, b, p.ExecCfg().Codec, descID, tableDesc.TableDesc()) if err != nil { return err } exists, _, err := sqlbase.LookupPublicTableID( - params.ctx, params.p.txn, targetDbDesc.ID, newTn.Table(), + params.ctx, params.p.txn, p.ExecCfg().Codec, targetDbDesc.ID, newTn.Table(), ) if err == nil && exists { return sqlbase.NewRelationAlreadyExistsError(newTn.Table()) @@ -170,7 +170,7 @@ func (n *renameTableNode) Close(context.Context) {} func (p *planner) dependentViewRenameError( ctx context.Context, typeName, objName string, parentID, viewID sqlbase.ID, ) error { - viewDesc, err := sqlbase.GetTableDescFromID(ctx, p.txn, viewID) + viewDesc, err := sqlbase.GetTableDescFromID(ctx, p.txn, p.ExecCfg().Codec, viewID) if err != nil { return err } diff --git a/pkg/sql/rename_test.go b/pkg/sql/rename_test.go index 252198b971c3..68c4895dacdb 100644 --- a/pkg/sql/rename_test.go +++ b/pkg/sql/rename_test.go @@ -48,7 +48,7 @@ func TestRenameTable(t *testing.T) { // Check the table descriptor. desc := &sqlbase.Descriptor{} - tableDescKey := sqlbase.MakeDescMetadataKey(sqlbase.ID(counter)) + tableDescKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(counter)) ts, err := kvDB.GetProtoTs(context.TODO(), tableDescKey, desc) if err != nil { t.Fatal(err) @@ -96,7 +96,7 @@ func isRenamed( expectedVersion sqlbase.DescriptorVersion, cfg *config.SystemConfig, ) bool { - descKey := sqlbase.MakeDescMetadataKey(tableID) + descKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableID) val := cfg.GetValue(descKey) if val == nil { return false @@ -165,7 +165,7 @@ CREATE TABLE test.t (a INT PRIMARY KEY); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") mu.Lock() waitTableID = tableDesc.ID mu.Unlock() @@ -398,7 +398,7 @@ CREATE TABLE test.t (a INT PRIMARY KEY); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") // The expected version will be the result of two increments for the two // schema changes and one increment for signaling of the completion of the // drain. See the above comment for an explanation of why there's only one @@ -425,7 +425,7 @@ CREATE TABLE test.t (a INT PRIMARY KEY); wg.Wait() // Table rename to t3 was successful. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "test", "t3") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t3") if version := tableDesc.Version; expectedVersion != version { t.Fatalf("version mismatch: expected = %d, current = %d", expectedVersion, version) } diff --git a/pkg/sql/resolver.go b/pkg/sql/resolver.go index a020ea700eac..ca72f9dbbef4 100644 --- a/pkg/sql/resolver.go +++ b/pkg/sql/resolver.go @@ -14,6 +14,7 @@ import ( "context" "fmt" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -58,7 +59,9 @@ func (p *planner) ResolveUncachedDatabaseByName( ctx context.Context, dbName string, required bool, ) (res *UncachedDatabaseDescriptor, err error) { p.runWithOptions(resolveFlags{skipCache: true}, func() { - res, err = p.LogicalSchemaAccessor().GetDatabaseDesc(ctx, p.txn, dbName, p.CommonLookupFlags(required)) + res, err = p.LogicalSchemaAccessor().GetDatabaseDesc( + ctx, p.txn, p.ExecCfg().Codec, dbName, p.CommonLookupFlags(required), + ) }) return res, err } @@ -70,11 +73,12 @@ func GetObjectNames( ctx context.Context, txn *kv.Txn, sc SchemaResolver, + codec keys.SQLCodec, dbDesc *DatabaseDescriptor, scName string, explicitPrefix bool, ) (res TableNames, err error) { - return sc.LogicalSchemaAccessor().GetObjectNames(ctx, txn, dbDesc, scName, + return sc.LogicalSchemaAccessor().GetObjectNames(ctx, txn, codec, dbDesc, scName, tree.DatabaseListFlags{ CommonLookupFlags: sc.CommonLookupFlags(true /* required */), ExplicitPrefix: explicitPrefix, @@ -289,11 +293,11 @@ func (p *planner) LookupSchema( ctx context.Context, dbName, scName string, ) (found bool, scMeta tree.SchemaMeta, err error) { sc := p.LogicalSchemaAccessor() - dbDesc, err := sc.GetDatabaseDesc(ctx, p.txn, dbName, p.CommonLookupFlags(false /*required*/)) + dbDesc, err := sc.GetDatabaseDesc(ctx, p.txn, p.ExecCfg().Codec, dbName, p.CommonLookupFlags(false /*required*/)) if err != nil || dbDesc == nil { return false, nil, err } - found, _, err = sc.IsValidSchema(ctx, p.txn, dbDesc.ID, scName) + found, _, err = sc.IsValidSchema(ctx, p.txn, p.ExecCfg().Codec, dbDesc.ID, scName) if err != nil { return false, nil, err } @@ -307,7 +311,7 @@ func (p *planner) LookupObject( sc := p.LogicalSchemaAccessor() p.tableName = tree.MakeTableNameWithSchema(tree.Name(dbName), tree.Name(scName), tree.Name(tbName)) lookupFlags.CommonLookupFlags = p.CommonLookupFlags(false /* required */) - objDesc, err := sc.GetObjectDesc(ctx, p.txn, p.ExecCfg().Settings, &p.tableName, lookupFlags) + objDesc, err := sc.GetObjectDesc(ctx, p.txn, p.ExecCfg().Settings, p.ExecCfg().Codec, &p.tableName, lookupFlags) return objDesc != nil, objDesc, err } @@ -380,12 +384,12 @@ func getDescriptorsFromTargetList( func (p *planner) getQualifiedTableName( ctx context.Context, desc *sqlbase.TableDescriptor, ) (string, error) { - dbDesc, err := sqlbase.GetDatabaseDescFromID(ctx, p.txn, desc.ParentID) + dbDesc, err := sqlbase.GetDatabaseDescFromID(ctx, p.txn, p.ExecCfg().Codec, desc.ParentID) if err != nil { return "", err } schemaID := desc.GetParentSchemaID() - schemaName, err := schema.ResolveNameByID(ctx, p.txn, desc.ParentID, schemaID) + schemaName, err := schema.ResolveNameByID(ctx, p.txn, p.ExecCfg().Codec, desc.ParentID, schemaID) if err != nil { return "", err } @@ -409,17 +413,18 @@ func findTableContainingIndex( ctx context.Context, txn *kv.Txn, sc SchemaResolver, + codec keys.SQLCodec, dbName, scName string, idxName tree.UnrestrictedName, lookupFlags tree.CommonLookupFlags, ) (result *tree.TableName, desc *MutableTableDescriptor, err error) { sa := sc.LogicalSchemaAccessor() - dbDesc, err := sa.GetDatabaseDesc(ctx, txn, dbName, lookupFlags) + dbDesc, err := sa.GetDatabaseDesc(ctx, txn, codec, dbName, lookupFlags) if dbDesc == nil || err != nil { return nil, nil, err } - tns, err := sa.GetObjectNames(ctx, txn, dbDesc, scName, + tns, err := sa.GetObjectNames(ctx, txn, codec, dbDesc, scName, tree.DatabaseListFlags{CommonLookupFlags: lookupFlags, ExplicitPrefix: true}) if err != nil { return nil, nil, err @@ -470,7 +475,7 @@ func expandMutableIndexName( ctx context.Context, p *planner, index *tree.TableIndexName, requireTable bool, ) (tn *tree.TableName, desc *MutableTableDescriptor, err error) { p.runWithOptions(resolveFlags{skipCache: true}, func() { - tn, desc, err = expandIndexName(ctx, p.txn, p, index, requireTable) + tn, desc, err = expandIndexName(ctx, p.txn, p, p.ExecCfg().Codec, index, requireTable) }) return tn, desc, err } @@ -479,6 +484,7 @@ func expandIndexName( ctx context.Context, txn *kv.Txn, sc SchemaResolver, + codec keys.SQLCodec, index *tree.TableIndexName, requireTable bool, ) (tn *tree.TableName, desc *MutableTableDescriptor, err error) { @@ -514,7 +520,7 @@ func expandIndexName( lookupFlags := sc.CommonLookupFlags(requireTable) var foundTn *tree.TableName - foundTn, desc, err = findTableContainingIndex(ctx, txn, sc, tn.Catalog(), tn.Schema(), index.Index, lookupFlags) + foundTn, desc, err = findTableContainingIndex(ctx, txn, sc, codec, tn.Catalog(), tn.Schema(), index.Index, lookupFlags) if err != nil { return nil, nil, err } diff --git a/pkg/sql/revert_test.go b/pkg/sql/revert_test.go index 59b9f2feabaa..34c711668ef3 100644 --- a/pkg/sql/revert_test.go +++ b/pkg/sql/revert_test.go @@ -16,6 +16,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" @@ -67,7 +68,7 @@ func TestRevertTable(t *testing.T) { require.Equal(t, before, aost) // Revert the table to ts. - desc := sqlbase.GetTableDescriptor(kv, "test", "test") + desc := sqlbase.GetTableDescriptor(kv, keys.SystemSQLCodec, "test", "test") desc.State = sqlbase.TableDescriptor_OFFLINE // bypass the offline check. require.NoError(t, RevertTables(context.TODO(), kv, &execCfg, []*sqlbase.TableDescriptor{desc}, targetTime, 10)) @@ -93,9 +94,9 @@ func TestRevertTable(t *testing.T) { db.Exec(t, `DELETE FROM child WHERE a % 7 = 0`) // Revert the table to ts. - desc := sqlbase.GetTableDescriptor(kv, "test", "test") + desc := sqlbase.GetTableDescriptor(kv, keys.SystemSQLCodec, "test", "test") desc.State = sqlbase.TableDescriptor_OFFLINE - child := sqlbase.GetTableDescriptor(kv, "test", "child") + child := sqlbase.GetTableDescriptor(kv, keys.SystemSQLCodec, "test", "child") child.State = sqlbase.TableDescriptor_OFFLINE t.Run("reject only parent", func(t *testing.T) { require.Error(t, RevertTables(ctx, kv, &execCfg, []*sqlbase.TableDescriptor{desc}, targetTime, 10)) diff --git a/pkg/sql/row/cascader.go b/pkg/sql/row/cascader.go index 4c79f939e491..4301472138ef 100644 --- a/pkg/sql/row/cascader.go +++ b/pkg/sql/row/cascader.go @@ -954,13 +954,19 @@ func (c *cascader) updateRows( return nil, nil, nil, 0, err } if !column.Nullable { - database, err := sqlbase.GetDatabaseDescFromID(ctx, c.txn, referencingTable.ParentID) + database, err := sqlbase.GetDatabaseDescFromID( + ctx, + c.txn, + c.evalCtx.Codec, + referencingTable.ParentID, + ) if err != nil { return nil, nil, nil, 0, err } schema, err := schema.ResolveNameByID( ctx, c.txn, + c.evalCtx.Codec, referencingTable.ParentID, referencingTable.GetParentSchemaID(), ) diff --git a/pkg/sql/row/fetcher.go b/pkg/sql/row/fetcher.go index 1e0543f2bc70..0442daa4cb11 100644 --- a/pkg/sql/row/fetcher.go +++ b/pkg/sql/row/fetcher.go @@ -161,7 +161,7 @@ type FetcherTableArgs struct { // // Process res.row // } type Fetcher struct { - // codec is used to encode and decode SQL keys. + // codec is used to encode and decode sql keys. codec keys.SQLCodec // tables is a slice of all the tables and their descriptors for which diff --git a/pkg/sql/row/fetcher_mvcc_test.go b/pkg/sql/row/fetcher_mvcc_test.go index 73022b873b6d..68093393bedf 100644 --- a/pkg/sql/row/fetcher_mvcc_test.go +++ b/pkg/sql/row/fetcher_mvcc_test.go @@ -83,8 +83,8 @@ func TestRowFetcherMVCCMetadata(t *testing.T) { e STRING, f STRING, PRIMARY KEY (e, f) ) INTERLEAVE IN PARENT parent (e)`) - parentDesc := sqlbase.GetImmutableTableDescriptor(kvDB, `d`, `parent`) - childDesc := sqlbase.GetImmutableTableDescriptor(kvDB, `d`, `child`) + parentDesc := sqlbase.GetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, `d`, `parent`) + childDesc := sqlbase.GetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, `d`, `child`) var args []row.FetcherTableArgs for _, desc := range []*sqlbase.ImmutableTableDescriptor{parentDesc, childDesc} { colIdxMap := make(map[sqlbase.ColumnID]int) diff --git a/pkg/sql/row/fetcher_test.go b/pkg/sql/row/fetcher_test.go index d78f7a0b2aa2..a3b91b1f8ac1 100644 --- a/pkg/sql/row/fetcher_test.go +++ b/pkg/sql/row/fetcher_test.go @@ -148,7 +148,7 @@ func TestNextRowSingle(t *testing.T) { // We try to read rows from each table. for tableName, table := range tables { t.Run(tableName, func(t *testing.T) { - tableDesc := sqlbase.GetImmutableTableDescriptor(kvDB, sqlutils.TestDB, tableName) + tableDesc := sqlbase.GetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, tableName) var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, table.nCols-1) @@ -268,7 +268,7 @@ func TestNextRowBatchLimiting(t *testing.T) { // We try to read rows from each table. for tableName, table := range tables { t.Run(tableName, func(t *testing.T) { - tableDesc := sqlbase.GetImmutableTableDescriptor(kvDB, sqlutils.TestDB, tableName) + tableDesc := sqlbase.GetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, tableName) var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, table.nCols-1) @@ -380,7 +380,7 @@ INDEX(c) alloc := &sqlbase.DatumAlloc{} - tableDesc := sqlbase.GetImmutableTableDescriptor(kvDB, sqlutils.TestDB, tableName) + tableDesc := sqlbase.GetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, tableName) var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, table.nCols-1) @@ -559,7 +559,7 @@ func TestNextRowSecondaryIndex(t *testing.T) { // We try to read rows from each index. for tableName, table := range tables { t.Run(tableName, func(t *testing.T) { - tableDesc := sqlbase.GetImmutableTableDescriptor(kvDB, sqlutils.TestDB, tableName) + tableDesc := sqlbase.GetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, tableName) var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, table.nVals-1) @@ -912,7 +912,7 @@ func TestNextRowInterleaved(t *testing.T) { // RowFetcher. idLookups := make(map[uint64]*fetcherEntryArgs, len(entries)) for i, entry := range entries { - tableDesc := sqlbase.GetImmutableTableDescriptor(kvDB, sqlutils.TestDB, entry.tableName) + tableDesc := sqlbase.GetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, entry.tableName) var indexID sqlbase.IndexID if entry.indexIdx == 0 { indexID = tableDesc.PrimaryIndex.ID @@ -1031,7 +1031,7 @@ func TestRowFetcherReset(t *testing.T) { 0, sqlutils.ToRowFn(sqlutils.RowIdxFn, sqlutils.RowModuloFn(1)), ) - tableDesc := sqlbase.GetImmutableTableDescriptor(kvDB, sqlutils.TestDB, "foo") + tableDesc := sqlbase.GetImmutableTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "foo") var valNeededForCol util.FastIntSet valNeededForCol.AddRange(0, 1) args := []initFetcherArgs{ diff --git a/pkg/sql/rowexec/backfiller.go b/pkg/sql/rowexec/backfiller.go index b8e28bb62235..a45330d6bf47 100644 --- a/pkg/sql/rowexec/backfiller.go +++ b/pkg/sql/rowexec/backfiller.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/backfill" @@ -140,8 +141,10 @@ func (b *backfiller) doRun(ctx context.Context) *execinfrapb.ProducerMetadata { if !st.Version.IsActive(ctx, clusterversion.VersionAtomicChangeReplicasTrigger) { // There is a node of older version which could be the coordinator. // So we communicate the finished work by writing to the jobs row. - err = WriteResumeSpan(ctx, + err = WriteResumeSpan( + ctx, b.flowCtx.Cfg.DB, + b.flowCtx.Codec(), b.spec.Table.ID, b.spec.Table.Mutations[0].MutationID, b.filter, @@ -237,11 +240,12 @@ func GetResumeSpans( ctx context.Context, jobsRegistry *jobs.Registry, txn *kv.Txn, + codec keys.SQLCodec, tableID sqlbase.ID, mutationID sqlbase.MutationID, filter backfill.MutationFilter, ) ([]roachpb.Span, *jobs.Job, int, error) { - tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, tableID) + tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, codec, tableID) if err != nil { return nil, nil, 0, err } @@ -317,6 +321,7 @@ func SetResumeSpansInJob( func WriteResumeSpan( ctx context.Context, db *kv.DB, + codec keys.SQLCodec, id sqlbase.ID, mutationID sqlbase.MutationID, filter backfill.MutationFilter, @@ -327,7 +332,9 @@ func WriteResumeSpan( defer tracing.FinishSpan(traceSpan) return db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - resumeSpans, job, mutationIdx, error := GetResumeSpans(ctx, jobsRegistry, txn, id, mutationID, filter) + resumeSpans, job, mutationIdx, error := GetResumeSpans( + ctx, jobsRegistry, txn, codec, id, mutationID, filter, + ) if error != nil { return error } diff --git a/pkg/sql/rowexec/backfiller_test.go b/pkg/sql/rowexec/backfiller_test.go index 5c39c3a6e6db..7c194d915974 100644 --- a/pkg/sql/rowexec/backfiller_test.go +++ b/pkg/sql/rowexec/backfiller_test.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" @@ -71,11 +72,11 @@ func TestWriteResumeSpan(t *testing.T) { } registry := server.JobRegistry().(*jobs.Registry) - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if err := kvDB.Put( ctx, - sqlbase.MakeDescMetadataKey(tableDesc.ID), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), sqlbase.WrapDescriptor(tableDesc), ); err != nil { t.Fatal(err) @@ -142,7 +143,7 @@ func TestWriteResumeSpan(t *testing.T) { finished.EndKey = test.resume.Key } if err := rowexec.WriteResumeSpan( - ctx, kvDB, tableDesc.ID, mutationID, backfill.IndexMutationFilter, roachpb.Spans{finished}, registry, + ctx, kvDB, keys.SystemSQLCodec, tableDesc.ID, mutationID, backfill.IndexMutationFilter, roachpb.Spans{finished}, registry, ); err != nil { t.Error(err) } @@ -174,7 +175,7 @@ func TestWriteResumeSpan(t *testing.T) { if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error got, _, _, err = rowexec.GetResumeSpans( - ctx, registry, txn, tableDesc.ID, mutationID, backfill.IndexMutationFilter) + ctx, registry, txn, keys.SystemSQLCodec, tableDesc.ID, mutationID, backfill.IndexMutationFilter) return err }); err != nil { t.Error(err) diff --git a/pkg/sql/rowexec/index_skip_table_reader_test.go b/pkg/sql/rowexec/index_skip_table_reader_test.go index 54731bf55cd7..31820123ea99 100644 --- a/pkg/sql/rowexec/index_skip_table_reader_test.go +++ b/pkg/sql/rowexec/index_skip_table_reader_test.go @@ -159,13 +159,13 @@ func TestIndexSkipTableReader(t *testing.T) { 10, sqlutils.ToRowFn(xFnt7, nullt7, nullt7)) - td1 := sqlbase.GetTableDescriptor(kvDB, "test", "t1") - td2 := sqlbase.GetTableDescriptor(kvDB, "test", "t2") - td3 := sqlbase.GetTableDescriptor(kvDB, "test", "t3") - td4 := sqlbase.GetTableDescriptor(kvDB, "test", "t4") - td5 := sqlbase.GetTableDescriptor(kvDB, "test", "t5") - td6 := sqlbase.GetTableDescriptor(kvDB, "test", "t6") - td7 := sqlbase.GetTableDescriptor(kvDB, "test", "t7") + td1 := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t1") + td2 := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t2") + td3 := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t3") + td4 := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t4") + td5 := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t5") + td6 := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t6") + td7 := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t7") makeIndexSpan := func(td *sqlbase.TableDescriptor, start, end int) execinfrapb.TableReaderSpan { var span roachpb.Span @@ -490,7 +490,7 @@ ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[ } kvDB := tc.Server(0).DB() - td := sqlbase.GetTableDescriptor(kvDB, "test", "t") + td := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") st := tc.Server(0).ClusterSettings() evalCtx := tree.MakeTestingEvalContext(st) @@ -600,7 +600,7 @@ func BenchmarkIndexScanTableReader(b *testing.B) { expectedCount++ } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", tableName) + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", tableName) runner := func(reader execinfra.RowSource, b *testing.B) { reader.Start(ctx) diff --git a/pkg/sql/rowexec/indexjoiner_test.go b/pkg/sql/rowexec/indexjoiner_test.go index b9d31b82930c..3cab0c8e7517 100644 --- a/pkg/sql/rowexec/indexjoiner_test.go +++ b/pkg/sql/rowexec/indexjoiner_test.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -61,8 +62,8 @@ func TestIndexJoiner(t *testing.T) { 99, sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn)) - td := sqlbase.GetTableDescriptor(kvDB, "test", "t") - tdf := sqlbase.GetTableDescriptor(kvDB, "test", "t2") + td := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") + tdf := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t2") v := [10]sqlbase.EncDatum{} for i := range v { diff --git a/pkg/sql/rowexec/interleaved_reader_joiner_test.go b/pkg/sql/rowexec/interleaved_reader_joiner_test.go index 45865185d2f8..4f1d5fc211b3 100644 --- a/pkg/sql/rowexec/interleaved_reader_joiner_test.go +++ b/pkg/sql/rowexec/interleaved_reader_joiner_test.go @@ -118,10 +118,10 @@ func TestInterleavedReaderJoiner(t *testing.T) { (32, 32, '32') `, sqlutils.TestDB)) - pd := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, "parent") - cd1 := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, "child1") - cd2 := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, "child2") - cd3 := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, "child3") + pd := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "parent") + cd1 := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "child1") + cd2 := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "child2") + cd3 := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "child3") // InterleavedReaderJoiner specs for each parent-child combination used // throughout the test cases. @@ -462,9 +462,9 @@ func TestInterleavedReaderJoinerErrors(t *testing.T) { sqlutils.ToRowFn(sqlutils.RowModuloShiftedFn(0, 0), sqlutils.RowModuloShiftedFn(0), sqlutils.RowIdxFn), ) - pd := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, "parent") - cd := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, "child") - gcd := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, "grandchild") + pd := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "parent") + cd := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "child") + gcd := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "grandchild") testCases := []struct { spec execinfrapb.InterleavedReaderJoinerSpec @@ -572,8 +572,8 @@ func TestInterleavedReaderJoinerTrailingMetadata(t *testing.T) { func(row int) []tree.Datum { return nil }, ) - pd := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, "parent") - cd := sqlbase.GetTableDescriptor(kvDB, sqlutils.TestDB, "child") + pd := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "parent") + cd := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "child") evalCtx := tree.MakeTestingEvalContext(s.ClusterSettings()) defer evalCtx.Stop(ctx) diff --git a/pkg/sql/rowexec/joinreader_test.go b/pkg/sql/rowexec/joinreader_test.go index 146e1624e137..93f584cf1962 100644 --- a/pkg/sql/rowexec/joinreader_test.go +++ b/pkg/sql/rowexec/joinreader_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -74,14 +75,14 @@ func TestJoinReader(t *testing.T) { t.Fatal(err) } - tdSecondary := sqlbase.GetTableDescriptor(kvDB, "test", "t") + tdSecondary := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") sqlutils.CreateTable(t, sqlDB, "t2", "a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), FAMILY f1 (a, b), FAMILY f2 (s), FAMILY f3 (sum), INDEX bs (b,s)", 99, sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn)) - tdFamily := sqlbase.GetTableDescriptor(kvDB, "test", "t2") + tdFamily := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t2") sqlutils.CreateTable(t, sqlDB, "t3parent", "a INT PRIMARY KEY", @@ -93,7 +94,7 @@ func TestJoinReader(t *testing.T) { "t3parent(a)", 99, sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn)) - tdInterleaved := sqlbase.GetTableDescriptor(kvDB, "test", "t3") + tdInterleaved := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t3") testCases := []struct { description string @@ -497,7 +498,7 @@ CREATE TABLE test.t (a INT, s STRING, INDEX (a, s))`); err != nil { key, stringColVal, numRows); err != nil { t.Fatal(err) } - td := sqlbase.GetTableDescriptor(kvDB, "test", "t") + td := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") st := cluster.MakeTestingClusterSettings() tempEngine, _, err := storage.NewTempEngine(ctx, storage.DefaultStorageEngine, base.DefaultTestTempStorageConfig(st), base.DefaultTestStoreSpec) @@ -592,7 +593,7 @@ func TestJoinReaderDrain(t *testing.T) { 1, /* numRows */ sqlutils.ToRowFn(sqlutils.RowIdxFn), ) - td := sqlbase.GetTableDescriptor(kvDB, "test", "t") + td := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") st := s.ClusterSettings() tempEngine, _, err := storage.NewTempEngine(context.Background(), storage.DefaultStorageEngine, base.DefaultTestTempStorageConfig(st), base.DefaultTestStoreSpec) @@ -826,7 +827,7 @@ func BenchmarkJoinReader(b *testing.B) { // Get the table descriptor and find the index that will provide us with // the expected match ratio. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", tableName) + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", tableName) indexIdx := uint32(0) for i := range tableDesc.Indexes { require.Equal(b, 1, len(tableDesc.Indexes[i].ColumnNames), "all indexes created in this benchmark should only contain one column") diff --git a/pkg/sql/rowexec/tablereader_test.go b/pkg/sql/rowexec/tablereader_test.go index 25e5147740bf..d6360128de18 100644 --- a/pkg/sql/rowexec/tablereader_test.go +++ b/pkg/sql/rowexec/tablereader_test.go @@ -65,7 +65,7 @@ func TestTableReader(t *testing.T) { 99, sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn)) - td := sqlbase.GetTableDescriptor(kvDB, "test", "t") + td := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") makeIndexSpan := func(start, end int) execinfrapb.TableReaderSpan { var span roachpb.Span @@ -207,7 +207,7 @@ ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[ } kvDB := tc.Server(0).DB() - td := sqlbase.GetTableDescriptor(kvDB, "test", "t") + td := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") st := tc.Server(0).ClusterSettings() evalCtx := tree.MakeTestingEvalContext(st) @@ -314,7 +314,7 @@ func TestLimitScans(t *testing.T) { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") evalCtx := tree.MakeTestingEvalContext(s.ClusterSettings()) defer evalCtx.Stop(ctx) @@ -421,7 +421,7 @@ func BenchmarkTableReader(b *testing.B) { numRows, sqlutils.ToRowFn(sqlutils.RowIdxFn, sqlutils.RowModuloFn(42)), ) - tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", tableName) + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", tableName) flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, diff --git a/pkg/sql/rowexec/zigzagjoiner_test.go b/pkg/sql/rowexec/zigzagjoiner_test.go index 3c74d4603066..883ea02f2c58 100644 --- a/pkg/sql/rowexec/zigzagjoiner_test.go +++ b/pkg/sql/rowexec/zigzagjoiner_test.go @@ -15,6 +15,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -183,17 +184,17 @@ func TestZigzagJoiner(t *testing.T) { true, /* shouldPrint */ ) - empty := sqlbase.GetTableDescriptor(kvDB, "test", "empty") - single := sqlbase.GetTableDescriptor(kvDB, "test", "single") - smallDesc := sqlbase.GetTableDescriptor(kvDB, "test", "small") - medDesc := sqlbase.GetTableDescriptor(kvDB, "test", "med") - highRangeDesc := sqlbase.GetTableDescriptor(kvDB, "test", "offset") - overlappingDesc := sqlbase.GetTableDescriptor(kvDB, "test", "overlapping") - compDesc := sqlbase.GetTableDescriptor(kvDB, "test", "comp") - revCompDesc := sqlbase.GetTableDescriptor(kvDB, "test", "rev") - compUnqDesc := sqlbase.GetTableDescriptor(kvDB, "test", "unq") - t2Desc := sqlbase.GetTableDescriptor(kvDB, "test", "t2") - nullableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "nullable") + empty := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "empty") + single := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "single") + smallDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "small") + medDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "med") + highRangeDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "offset") + overlappingDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "overlapping") + compDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "comp") + revCompDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "rev") + compUnqDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "unq") + t2Desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t2") + nullableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "nullable") testCases := []zigzagJoinerTestCase{ { @@ -565,7 +566,7 @@ func TestZigzagJoinerDrain(t *testing.T) { 1, /* numRows */ sqlutils.ToRowFn(sqlutils.RowIdxFn, sqlutils.RowIdxFn, sqlutils.RowIdxFn, sqlutils.RowIdxFn), ) - td := sqlbase.GetTableDescriptor(kvDB, "test", "t") + td := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") evalCtx := tree.MakeTestingEvalContext(s.ClusterSettings()) defer evalCtx.Stop(ctx) diff --git a/pkg/sql/scatter_test.go b/pkg/sql/scatter_test.go index 9b0feeeb87b4..aad2dda5a9f7 100644 --- a/pkg/sql/scatter_test.go +++ b/pkg/sql/scatter_test.go @@ -120,7 +120,7 @@ func TestScatterResponse(t *testing.T) { 1000, sqlutils.ToRowFn(sqlutils.RowIdxFn, sqlutils.RowModuloFn(10)), ) - tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") r := sqlutils.MakeSQLRunner(sqlDB) r.Exec(t, "ALTER TABLE test.t SPLIT AT (SELECT i*10 FROM generate_series(1, 99) AS g(i))") diff --git a/pkg/sql/schema/schema.go b/pkg/sql/schema/schema.go index 924460f29f5f..51b8f7ea2eee 100644 --- a/pkg/sql/schema/schema.go +++ b/pkg/sql/schema/schema.go @@ -36,7 +36,7 @@ var staticSchemaIDMap = map[sqlbase.ID]string{ // Instead, we have to rely on a scan of the kv table. // TODO(sqlexec): this should probably be cached. func ResolveNameByID( - ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, schemaID sqlbase.ID, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, dbID sqlbase.ID, schemaID sqlbase.ID, ) (string, error) { // Fast-path for public schema and virtual schemas, to avoid hot lookups. for id, schemaName := range staticSchemaIDMap { @@ -44,7 +44,7 @@ func ResolveNameByID( return schemaName, nil } } - schemas, err := GetForDatabase(ctx, txn, dbID) + schemas, err := GetForDatabase(ctx, txn, codec, dbID) if err != nil { return "", err } @@ -57,11 +57,11 @@ func ResolveNameByID( // GetForDatabase looks up and returns all available // schema ids to names for a given database. func GetForDatabase( - ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, dbID sqlbase.ID, ) (map[sqlbase.ID]string, error) { log.Eventf(ctx, "fetching all schema descriptor IDs for %d", dbID) - nameKey := sqlbase.NewSchemaKey(dbID, "" /* name */).Key() + nameKey := sqlbase.NewSchemaKey(dbID, "" /* name */).Key(codec) kvs, err := txn.Scan(ctx, nameKey, nameKey.PrefixEnd(), 0 /* maxRows */) if err != nil { return nil, err @@ -78,7 +78,7 @@ func GetForDatabase( if _, ok := ret[id]; ok { continue } - _, _, name, err := sqlbase.DecodeNameMetadataKey(kv.Key) + _, _, name, err := sqlbase.DecodeNameMetadataKey(codec, kv.Key) if err != nil { return nil, err } diff --git a/pkg/sql/schema_accessors.go b/pkg/sql/schema_accessors.go index b68a81fd2543..56dfaf2f87b3 100644 --- a/pkg/sql/schema_accessors.go +++ b/pkg/sql/schema_accessors.go @@ -13,6 +13,7 @@ package sql import ( "context" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -75,20 +76,20 @@ type SchemaAccessor interface { // GetDatabaseDesc looks up a database by name and returns its // descriptor. If the database is not found and required is true, // an error is returned; otherwise a nil reference is returned. - GetDatabaseDesc(ctx context.Context, txn *kv.Txn, dbName string, flags tree.DatabaseLookupFlags) (*DatabaseDescriptor, error) + GetDatabaseDesc(ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, dbName string, flags tree.DatabaseLookupFlags) (*DatabaseDescriptor, error) // IsValidSchema returns true and the SchemaID if the given schema name is valid for the given database. - IsValidSchema(ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, scName string) (bool, sqlbase.ID, error) + IsValidSchema(ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, dbID sqlbase.ID, scName string) (bool, sqlbase.ID, error) // GetObjectNames returns the list of all objects in the given // database and schema. // TODO(solon): when separate schemas are supported, this // API should be extended to use schema descriptors. - GetObjectNames(ctx context.Context, txn *kv.Txn, db *DatabaseDescriptor, scName string, flags tree.DatabaseListFlags) (TableNames, error) + GetObjectNames(ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, db *DatabaseDescriptor, scName string, flags tree.DatabaseListFlags) (TableNames, error) // GetObjectDesc looks up an object by name and returns both its // descriptor and that of its parent database. If the object is not // found and flags.required is true, an error is returned, otherwise // a nil reference is returned. - GetObjectDesc(ctx context.Context, txn *kv.Txn, settings *cluster.Settings, name *ObjectName, flags tree.ObjectLookupFlags) (ObjectDescriptor, error) + GetObjectDesc(ctx context.Context, txn *kv.Txn, settings *cluster.Settings, codec keys.SQLCodec, name *ObjectName, flags tree.ObjectLookupFlags) (ObjectDescriptor, error) } diff --git a/pkg/sql/schema_change_migrations_test.go b/pkg/sql/schema_change_migrations_test.go index 05500a3cfd74..5345aa5b9c69 100644 --- a/pkg/sql/schema_change_migrations_test.go +++ b/pkg/sql/schema_change_migrations_test.go @@ -22,6 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" @@ -298,9 +299,9 @@ func migrateJobToOldFormat( ) error { ctx := context.Background() - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if schemaChangeType == CreateTable { - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "new_table") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "new_table") } if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { @@ -358,7 +359,9 @@ func migrateJobToOldFormat( if err := txn.SetSystemConfigTrigger(); err != nil { return err } - return kvDB.Put(ctx, sqlbase.MakeDescMetadataKey(tableDesc.GetID()), sqlbase.WrapDescriptor(tableDesc)) + return kvDB.Put(ctx, sqlbase.MakeDescMetadataKey( + keys.SystemSQLCodec, tableDesc.GetID()), sqlbase.WrapDescriptor(tableDesc), + ) }) } @@ -422,7 +425,7 @@ func migrateGCJobToOldFormat( return nil case DropIndex: - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if l := len(tableDesc.GCMutations); l != 1 { return errors.AssertionFailedf("expected exactly 1 GCMutation, found %d", l) } @@ -441,7 +444,9 @@ func migrateGCJobToOldFormat( if err := txn.SetSystemConfigTrigger(); err != nil { return err } - return kvDB.Put(ctx, sqlbase.MakeDescMetadataKey(tableDesc.GetID()), sqlbase.WrapDescriptor(tableDesc)) + return kvDB.Put(ctx, sqlbase.MakeDescMetadataKey( + keys.SystemSQLCodec, tableDesc.GetID()), sqlbase.WrapDescriptor(tableDesc), + ) }) default: return errors.Errorf("invalid schema change type: %d", schemaChangeType) @@ -868,7 +873,7 @@ func TestGCJobCreated(t *testing.T) { if _, err := sqlDB.Exec(`CREATE DATABASE t; CREATE TABLE t.test();`); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") tableDesc.State = sqlbase.TableDescriptor_DROP tableDesc.Version++ tableDesc.DropTime = 1 @@ -876,10 +881,14 @@ func TestGCJobCreated(t *testing.T) { if err := txn.SetSystemConfigTrigger(); err != nil { return err } - if err := sqlbase.RemoveObjectNamespaceEntry(ctx, txn, tableDesc.ID, tableDesc.ParentID, tableDesc.Name, false /* kvTrace */); err != nil { + if err := sqlbase.RemoveObjectNamespaceEntry( + ctx, txn, keys.SystemSQLCodec, tableDesc.ID, tableDesc.ParentID, tableDesc.Name, false, /* kvTrace */ + ); err != nil { return err } - return kvDB.Put(ctx, sqlbase.MakeDescMetadataKey(tableDesc.GetID()), sqlbase.WrapDescriptor(tableDesc)) + return kvDB.Put(ctx, sqlbase.MakeDescMetadataKey( + keys.SystemSQLCodec, tableDesc.GetID()), sqlbase.WrapDescriptor(tableDesc), + ) }); err != nil { t.Fatal(err) } diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index 554318d08a53..fc9c3539a8d3 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -353,8 +353,9 @@ func (sc *SchemaChanger) drainNames(ctx context.Context) error { func(txn *kv.Txn) error { b := txn.NewBatch() for _, drain := range namesToReclaim { - err := sqlbase.RemoveObjectNamespaceEntry(ctx, txn, drain.ParentID, drain.ParentSchemaID, - drain.Name, false /* KVTrace */) + err := sqlbase.RemoveObjectNamespaceEntry( + ctx, txn, sc.execCfg.Codec, drain.ParentID, drain.ParentSchemaID, drain.Name, false, /* KVTrace */ + ) if err != nil { return err } @@ -573,7 +574,7 @@ func (sc *SchemaChanger) handlePermanentSchemaChangeError( // initialize the job running status. func (sc *SchemaChanger) initJobRunningStatus(ctx context.Context) error { return sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - desc, err := sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) + desc, err := sqlbase.GetTableDescFromID(ctx, txn, sc.execCfg.Codec, sc.tableID) if err != nil { return err } @@ -726,7 +727,7 @@ func (sc *SchemaChanger) done(ctx context.Context) (*sqlbase.ImmutableTableDescr fksByBackrefTable = make(map[sqlbase.ID][]*sqlbase.ConstraintToUpdate) interleaveParents = make(map[sqlbase.ID]struct{}) - desc, err := sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) + desc, err := sqlbase.GetTableDescFromID(ctx, txn, sc.execCfg.Codec, sc.tableID) if err != nil { return err } @@ -993,7 +994,7 @@ func (sc *SchemaChanger) notFirstInLine( err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { notFirst = false var err error - desc, err = sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) + desc, err = sqlbase.GetTableDescFromID(ctx, txn, sc.execCfg.Codec, sc.tableID) if err != nil { return err } @@ -1053,7 +1054,7 @@ func (sc *SchemaChanger) maybeReverseMutations(ctx context.Context, causingError err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { fksByBackrefTable = make(map[sqlbase.ID][]*sqlbase.ConstraintToUpdate) var err error - desc, err := sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) + desc, err := sqlbase.GetTableDescFromID(ctx, txn, sc.execCfg.Codec, sc.tableID) if err != nil { return err } @@ -1164,7 +1165,7 @@ func (sc *SchemaChanger) maybeReverseMutations(ctx context.Context, causingError // Read the table descriptor from the store. The Version of the // descriptor has already been incremented in the transaction and // this descriptor can be modified without incrementing the version. - tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) + tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, sc.execCfg.Codec, sc.tableID) if err != nil { return err } diff --git a/pkg/sql/schema_changer_test.go b/pkg/sql/schema_changer_test.go index 5cf77711ec43..4a403c6d301a 100644 --- a/pkg/sql/schema_changer_test.go +++ b/pkg/sql/schema_changer_test.go @@ -98,6 +98,7 @@ func TestSchemaChangeProcess(t *testing.T) { execCfg.Clock, execCfg.InternalExecutor, execCfg.Settings, + execCfg.Codec, sql.LeaseManagerTestingKnobs{}, stopper, cfg, @@ -116,7 +117,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); } // Read table descriptor for version. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") expectedVersion := tableDesc.Version ctx := context.TODO() @@ -126,7 +127,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); t.Fatal(err) } - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") newVersion := tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) @@ -157,7 +158,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); expectedVersion++ if err := kvDB.Put( ctx, - sqlbase.MakeDescMetadataKey(tableDesc.ID), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), sqlbase.WrapDescriptor(tableDesc), ); err != nil { t.Fatal(err) @@ -173,7 +174,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); t.Fatal(err) } - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") newVersion = tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) @@ -185,7 +186,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); } } // RunStateMachineBeforeBackfill() doesn't complete the schema change. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.Mutations) == 0 { t.Fatalf("table expected to have an outstanding schema change: %v", tableDesc) } @@ -214,7 +215,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); } // Read table descriptor for version. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // A long running schema change operation runs through // a state machine that increments the version by 3. @@ -235,7 +236,7 @@ CREATE INDEX foo ON t.test (v) // Wait until index is created. for r := retry.Start(retryOpts); r.Next(); { - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.Indexes) == 1 { break } @@ -247,7 +248,7 @@ CREATE INDEX foo ON t.test (v) mTest.CheckQueryResults(t, indexQuery, [][]string{{"b"}, {"d"}}) // Ensure that the version has been incremented. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") newVersion := tableDesc.Version if newVersion != expectedVersion { t.Fatalf("bad version; e = %d, v = %d", expectedVersion, newVersion) @@ -260,7 +261,7 @@ CREATE INDEX foo ON t.test (v) for r := retry.Start(retryOpts); r.Next(); { // Ensure that the version gets incremented. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") name := tableDesc.Indexes[0].Name if name != "ufo" { t.Fatalf("bad index name %s", name) @@ -279,7 +280,7 @@ CREATE INDEX foo ON t.test (v) } // Wait until indexes are created. for r := retry.Start(retryOpts); r.Next(); { - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.Indexes) == count+1 { break } @@ -295,7 +296,7 @@ CREATE INDEX foo ON t.test (v) } func getTableKeyCount(ctx context.Context, kvDB *kv.DB) (int, error) { - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") tablePrefix := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.ID)) tableEnd := tablePrefix.PrefixEnd() kvs, err := kvDB.Scan(ctx, tablePrefix, tableEnd, 0) @@ -503,7 +504,7 @@ CREATE UNIQUE INDEX vidx ON t.test (v); } // Split the table into multiple ranges. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") var sps []sql.SplitPoint for i := 1; i <= numNodes-1; i++ { sps = append(sps, sql.SplitPoint{TargetNodeIdx: i, Vals: []interface{}{maxValue / numNodes * i}}) @@ -679,7 +680,7 @@ CREATE UNIQUE INDEX vidx ON t.test (v); } // Split the table into multiple ranges. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") var sps []sql.SplitPoint for i := 1; i <= numNodes-1; i++ { sps = append(sps, sql.SplitPoint{TargetNodeIdx: i, Vals: []interface{}{maxValue / numNodes * i}}) @@ -729,7 +730,7 @@ CREATE UNIQUE INDEX vidx ON t.test (v); } // Check that the table descriptor exists so we know the data will // eventually be deleted. - tbDescKey := sqlbase.MakeDescMetadataKey(tableDesc.ID) + tbDescKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID) if gr, err := kvDB.Get(ctx, tbDescKey); err != nil { t.Fatal(err) } else if !gr.Exists() { @@ -770,7 +771,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Bulk insert. if err := bulkInsertIntoTable(sqlDB, maxValue); err != nil { @@ -914,7 +915,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); `); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Add a zone config for the table. if _, err := addImmediateGCZoneConfig(sqlDB, tableDesc.ID); err != nil { t.Fatal(err) @@ -1144,7 +1145,7 @@ CREATE TABLE t.test ( } // Read table descriptor. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.Checks) != 3 { t.Fatalf("Expected 3 checks but got %d ", len(tableDesc.Checks)) } @@ -1154,7 +1155,7 @@ CREATE TABLE t.test ( } // Re-read table descriptor. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Only check_ab should remain if len(tableDesc.Checks) != 1 { checkExprs := make([]string, 0) @@ -1319,7 +1320,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") id := tableDesc.ID ctx := context.TODO() @@ -1455,7 +1456,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); // Allow async schema change purge to attempt backfill and error. atomic.StoreUint32(&enableAsyncSchemaChanges, 1) - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // deal with schema change knob if _, err := addImmediateGCZoneConfig(sqlDB, tableDesc.ID); err != nil { t.Fatal(err) @@ -1470,7 +1471,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); return nil }) - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // There is still a DROP INDEX mutation waiting for GC. if e := 1; len(tableDesc.GCMutations) != e { @@ -1498,7 +1499,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); atomic.StoreUint32(&enableAsyncSchemaChanges, 1) testutils.SucceedsSoon(t, func() error { - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.GCMutations) > 0 { return errors.Errorf("%d GC mutations remaining", len(tableDesc.GCMutations)) } @@ -1601,7 +1602,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); } // Check that constraints are cleaned up. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if checks := tableDesc.AllActiveAndInactiveChecks(); len(checks) > 0 { t.Fatalf("found checks %+v", checks) } @@ -1656,7 +1657,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") testCases := []struct { sql string @@ -1721,7 +1722,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); expectedCols := []string{"k", "b", "d"} // Wait until all the mutations have been processed. testutils.SucceedsSoon(t, func() error { - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.Mutations) > 0 { return errors.Errorf("%d mutations remaining", len(tableDesc.Mutations)) } @@ -1827,7 +1828,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); } testutils.SucceedsSoon(t, func() error { - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.GCMutations) > 0 { return errors.Errorf("%d gc mutations remaining", len(tableDesc.GCMutations)) } @@ -1902,7 +1903,7 @@ CREATE TABLE t.test ( `); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if tableDesc.Families[0].DefaultColumnID != 0 { t.Fatalf("default column id not set properly: %s", tableDesc) } @@ -1944,7 +1945,7 @@ CREATE TABLE t.test ( if _, err := sqlDB.Exec(`ALTER TABLE t.test ADD COLUMN v INT FAMILY F1`); err != nil { t.Fatal(err) } - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if tableDesc.Families[0].DefaultColumnID != 2 { t.Fatalf("default column id not set properly: %s", tableDesc) } @@ -2129,7 +2130,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT UNIQUE DEFAULT 23 CREATE FAMILY F3 } // The index is not regenerated. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.Indexes) > 0 { t.Fatalf("indexes %+v", tableDesc.Indexes) } @@ -2497,7 +2498,7 @@ CREATE TABLE t.test (k INT NOT NULL, v INT); } // GC the old indexes to be dropped after the PK change immediately. defer disableGCTTLStrictEnforcement(t, sqlDB)() - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if _, err := addImmediateGCZoneConfig(sqlDB, tableDesc.ID); err != nil { t.Fatal(err) } @@ -2708,7 +2709,7 @@ COMMIT; } // Ensure that t.test doesn't have any pending mutations // after the primary key change. - desc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(desc.Mutations) != 0 { t.Fatalf("expected to find 0 mutations, but found %d", len(desc.Mutations)) } @@ -2903,7 +2904,7 @@ ALTER TABLE t.test ALTER PRIMARY KEY USING COLUMNS (v); } // Wait for the async schema changer to run. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if _, err := addImmediateGCZoneConfig(sqlDB, tableDesc.ID); err != nil { t.Fatal(err) } @@ -2978,7 +2979,7 @@ CREATE TABLE t.test (k INT NOT NULL, v INT); // Ensure that the mutations corresponding to the primary key change are cleaned up and // that the job did not succeed even though it was canceled. testutils.SucceedsSoon(t, func() error { - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.Mutations) != 0 { return errors.Errorf("expected 0 mutations after cancellation, found %d", len(tableDesc.Mutations)) } @@ -2990,7 +2991,7 @@ CREATE TABLE t.test (k INT NOT NULL, v INT); // Stop any further attempts at cancellation, so the GC jobs don't fail. shouldCancel = false - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if _, err := addImmediateGCZoneConfig(db, tableDesc.ID); err != nil { t.Fatal(err) } @@ -3042,7 +3043,7 @@ CREATE TABLE t.test (k INT NOT NULL, v INT); <-hasAttemptedCancel sqlRun := sqlutils.MakeSQLRunner(sqlDB) - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") testutils.SucceedsSoon(t, func() error { return jobutils.VerifySystemJob(t, sqlRun, 1, jobspb.TypeSchemaChange, jobs.StatusSucceeded, jobs.Record{ Description: "CLEANUP JOB for 'ALTER TABLE t.public.test ALTER PRIMARY KEY USING COLUMNS (k)'", @@ -3184,7 +3185,7 @@ INSERT INTO t.test (k, v, length) VALUES (2, 3, 1); // Wait until both mutations are queued up. testutils.SucceedsSoon(t, func() error { - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if l := len(tableDesc.Mutations); l != 3 { return errors.Errorf("number of mutations = %d", l) } @@ -3282,7 +3283,7 @@ INSERT INTO t.test (k, v, length) VALUES (2, 3, 1); t.Fatalf("got: %s\nexpected: %s", create, expect) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if l := len(tableDesc.Mutations); l != 3 { t.Fatalf("number of mutations = %d", l) } @@ -3385,7 +3386,7 @@ func TestBackfillCompletesOnChunkBoundary(t *testing.T) { } // Split the table into multiple ranges. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") var sps []sql.SplitPoint for i := 1; i <= numNodes-1; i++ { sps = append(sps, sql.SplitPoint{TargetNodeIdx: i, Vals: []interface{}{maxValue / numNodes * i}}) @@ -3557,7 +3558,7 @@ CREATE TABLE d.t ( `); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "d", "t") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "d", "t") // Verify that this descriptor uses the new STORING encoding. Overwrite it // with one that uses the old encoding. for i, index := range tableDesc.Indexes { @@ -3573,7 +3574,7 @@ CREATE TABLE d.t ( } if err := kvDB.Put( context.TODO(), - sqlbase.MakeDescMetadataKey(tableDesc.GetID()), + sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.GetID()), sqlbase.WrapDescriptor(tableDesc), ); err != nil { t.Fatal(err) @@ -3689,7 +3690,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); } // Split the table into multiple ranges. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") var sps []sql.SplitPoint for i := 1; i <= numNodes-1; i++ { sps = append(sps, sql.SplitPoint{TargetNodeIdx: i, Vals: []interface{}{maxValue / numNodes * i}}) @@ -3852,7 +3853,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL DEFAULT (DECIMAL '3.14 t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Add a zone config. cfg := zonepb.DefaultZoneConfig() @@ -3877,7 +3878,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL DEFAULT (DECIMAL '3.14 t.Fatal(err) } - newTableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + newTableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if newTableDesc.Adding() { t.Fatalf("bad state = %s", newTableDesc.State) } @@ -3898,7 +3899,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL DEFAULT (DECIMAL '3.14 var droppedDesc *sqlbase.TableDescriptor if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error - droppedDesc, err = sqlbase.GetTableDescFromID(ctx, txn, tableDesc.ID) + droppedDesc, err = sqlbase.GetTableDescFromID(ctx, txn, keys.SystemSQLCodec, tableDesc.ID) return err }); err != nil { t.Fatal(err) @@ -3964,7 +3965,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL REFERENCES t.pi (d) DE t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Add a zone config. var cfg zonepb.ZoneConfig @@ -4005,7 +4006,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL REFERENCES t.pi (d) DE t.Fatalf("err = %v", err) } - newTableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + newTableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if newTableDesc.Adding() { t.Fatalf("bad state = %s", newTableDesc.State) } @@ -4017,7 +4018,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL REFERENCES t.pi (d) DE testutils.SucceedsSoon(t, func() error { if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error - _, err = sqlbase.GetTableDescFromID(ctx, txn, tableDesc.ID) + _, err = sqlbase.GetTableDescFromID(ctx, txn, keys.SystemSQLCodec, tableDesc.ID) return err }); err != nil { if err == sqlbase.ErrDescriptorNotFound { @@ -4041,7 +4042,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL REFERENCES t.pi (d) DE t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs)) } - fkTableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "pi") + fkTableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "pi") tablePrefix = keys.SystemSQLCodec.TablePrefix(uint32(fkTableDesc.ID)) tableEnd = tablePrefix.PrefixEnd() if kvs, err := kvDB.Scan(ctx, tablePrefix, tableEnd, 0); err != nil { @@ -4129,7 +4130,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); } // Check that an outstanding schema change exists. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") oldID := tableDesc.ID if lenMutations := len(tableDesc.Mutations); lenMutations != 3 { t.Fatalf("%d outstanding schema change", lenMutations) @@ -4148,7 +4149,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); wg.Wait() // The new table is truncated. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") tablePrefix := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.ID)) tableEnd := tablePrefix.PrefixEnd() if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil { @@ -4439,7 +4440,7 @@ ALTER TABLE t.test ADD COLUMN c INT AS (v + 4) STORED, ADD COLUMN d INT DEFAULT } // The descriptor version hasn't changed. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if tableDesc.Version != 1 { t.Fatalf("invalid version = %d", tableDesc.Version) } @@ -4508,7 +4509,7 @@ func TestCancelSchemaChange(t *testing.T) { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Split the table into multiple ranges. var sps []sql.SplitPoint const numSplits = numNodes * 2 @@ -4630,7 +4631,7 @@ func TestCancelSchemaChange(t *testing.T) { }) // Check that constraints are cleaned up. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if checks := tableDesc.AllActiveAndInactiveChecks(); len(checks) != 1 { t.Fatalf("expected 1 check, found %+v", checks) } @@ -4941,7 +4942,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); t.Fatal(err) } - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Bulk insert enough rows to exceed the chunk size. inserts := make([]string, maxValue+1) @@ -4959,7 +4960,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); t.Fatal(err) } - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.Indexes) > 0 || len(tableDesc.Mutations) > 0 { t.Fatalf("descriptor broken %d, %d", len(tableDesc.Indexes), len(tableDesc.Mutations)) } @@ -5010,7 +5011,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v JSON); t.Fatal(err) } - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") r := rand.New(rand.NewSource(timeutil.Now().UnixNano())) // Insert enough rows to exceed the chunk size. @@ -5031,7 +5032,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v JSON); t.Fatal(err) } - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.Indexes) > 0 || len(tableDesc.Mutations) > 0 { t.Fatalf("descriptor broken %d, %d", len(tableDesc.Indexes), len(tableDesc.Mutations)) } @@ -5492,7 +5493,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); `); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if err := bulkInsertIntoTable(sqlDB, maxValue); err != nil { t.Fatal(err) @@ -5599,7 +5600,7 @@ INSERT INTO t.test (k, v) VALUES (1, 99), (2, 100); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") sqlRun := sqlutils.MakeSQLRunner(sqlDB) runBeforeConstraintValidation = func() error { @@ -5727,10 +5728,10 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Wait until indexes are created. for r := retry.Start(retryOpts); r.Next(); { - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.Indexes) == 1 { break } @@ -5740,7 +5741,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); t.Fatal(err) } - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if e := 1; e != len(tableDesc.GCMutations) { t.Fatalf("e = %d, v = %d", e, len(tableDesc.GCMutations)) } @@ -5752,7 +5753,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); } // Ensure the GCMutations has not yet been completed. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if e := 1; e != len(tableDesc.GCMutations) { t.Fatalf("e = %d, v = %d", e, len(tableDesc.GCMutations)) } @@ -5768,7 +5769,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); // Ensure that GC mutations that cannot find their job will eventually be // cleared. testutils.SucceedsSoon(t, func() error { - tableDesc = sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") if len(tableDesc.GCMutations) > 0 { return errors.Errorf("%d gc mutations remaining", len(tableDesc.GCMutations)) } @@ -5885,7 +5886,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); INSERT INTO t.test VALUES (1, 2), (2, 2); `) require.NoError(t, err) - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Add a zone config for the table. _, err = addImmediateGCZoneConfig(sqlDB, tableDesc.ID) require.NoError(t, err) diff --git a/pkg/sql/scrub.go b/pkg/sql/scrub.go index 8ae9e2daa06d..e0788d13bf76 100644 --- a/pkg/sql/scrub.go +++ b/pkg/sql/scrub.go @@ -164,7 +164,7 @@ func (n *scrubNode) startScrubDatabase(ctx context.Context, p *planner, name *tr var tbNames TableNames for _, schema := range schemas { - toAppend, err := GetObjectNames(ctx, p.txn, p, dbDesc, schema, true /*explicitPrefix*/) + toAppend, err := GetObjectNames(ctx, p.txn, p, p.ExecCfg().Codec, dbDesc, schema, true /*explicitPrefix*/) if err != nil { return err } @@ -174,7 +174,7 @@ func (n *scrubNode) startScrubDatabase(ctx context.Context, p *planner, name *tr for i := range tbNames { tableName := &tbNames[i] objDesc, err := p.LogicalSchemaAccessor().GetObjectDesc(ctx, p.txn, p.ExecCfg().Settings, - tableName, p.ObjectLookupFlags(true /*required*/, false /*requireMutable*/)) + p.ExecCfg().Codec, tableName, p.ObjectLookupFlags(true /*required*/, false /*requireMutable*/)) if err != nil { return err } @@ -410,7 +410,7 @@ func createConstraintCheckOperations( tableName *tree.TableName, asOf hlc.Timestamp, ) (results []checkOperation, err error) { - constraints, err := tableDesc.GetConstraintInfo(ctx, p.txn) + constraints, err := tableDesc.GetConstraintInfo(ctx, p.txn, p.ExecCfg().Codec) if err != nil { return nil, err } diff --git a/pkg/sql/scrub_test.go b/pkg/sql/scrub_test.go index b785b9ecc2ee..8e020aa29234 100644 --- a/pkg/sql/scrub_test.go +++ b/pkg/sql/scrub_test.go @@ -54,7 +54,7 @@ INSERT INTO t."tEst" VALUES (10, 20); // Construct datums for our row values (k, v). values := []tree.Datum{tree.NewDInt(10), tree.NewDInt(20)} - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "tEst") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "tEst") secondaryIndex := &tableDesc.Indexes[0] colIDtoRowIndex := make(map[sqlbase.ColumnID]int) @@ -122,7 +122,7 @@ CREATE INDEX secondary ON t.test (v); t.Fatalf("unexpected error: %s", err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") secondaryIndexDesc := &tableDesc.Indexes[0] colIDtoRowIndex := make(map[sqlbase.ColumnID]int) @@ -215,7 +215,7 @@ INSERT INTO t.test VALUES (10, 20, 1337); t.Fatalf("unexpected error: %s", err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") secondaryIndexDesc := &tableDesc.Indexes[0] colIDtoRowIndex := make(map[sqlbase.ColumnID]int) @@ -334,7 +334,7 @@ INSERT INTO t.test VALUES (10, 2); t.Fatalf("unexpected error: %s", err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") colIDtoRowIndex := make(map[sqlbase.ColumnID]int) colIDtoRowIndex[tableDesc.Columns[0].ID] = 0 @@ -432,7 +432,7 @@ func TestScrubFKConstraintFKMissing(t *testing.T) { INSERT INTO t.child VALUES (10, 314); `) - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "child") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "child") // Construct datums for the child row values (child_id, parent_id). values := []tree.Datum{tree.NewDInt(10), tree.NewDInt(314)} @@ -569,7 +569,7 @@ INSERT INTO t.test VALUES (217, 314); t.Fatalf("unexpected error: %s", err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Construct datums for our row values (k, v). values := []tree.Datum{tree.NewDInt(217), tree.NewDInt(314)} @@ -650,7 +650,7 @@ INSERT INTO t.test VALUES (217, 314, 1337); t.Fatalf("unexpected error: %s", err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Construct datums for our row values (k, v, b). values := []tree.Datum{tree.NewDInt(217), tree.NewDInt(314), tree.NewDInt(1337)} @@ -748,14 +748,14 @@ CREATE TABLE t.test ( t.Fatalf("unexpected error: %s", err) } - oldTableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + oldTableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Drop the first column family. if _, err := db.Exec(`ALTER TABLE t.test DROP COLUMN v1`); err != nil { t.Fatalf("unexpected error: %s", err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Construct datums for our row values (k, v1). values := []tree.Datum{tree.NewDInt(217), tree.NewDInt(314)} @@ -858,7 +858,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v1 INT, v2 INT); `); err != nil { t.Fatalf("unexpected error: %s", err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Construct datums for our row values (k, v1, v2). values := []tree.Datum{tree.NewDInt(217), tree.NewDInt(314), tree.NewDInt(1337)} diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go index d551853c23df..6444165eec8e 100644 --- a/pkg/sql/sem/builtins/builtins.go +++ b/pkg/sql/sem/builtins/builtins.go @@ -2822,7 +2822,7 @@ may increase either contention or retry errors, or both.`, return nil, pgerror.Newf(pgcode.DatatypeMismatch, "expected tuple argument for row_tuple, found %s", args[2]) } - tableDesc, err := sqlbase.GetTableDescFromID(ctx.Context, ctx.Txn, sqlbase.ID(tableID)) + tableDesc, err := sqlbase.GetTableDescFromID(ctx.Context, ctx.Txn, ctx.Codec, sqlbase.ID(tableID)) if err != nil { return nil, err } @@ -3217,7 +3217,7 @@ may increase either contention or retry errors, or both.`, tableID := int(tree.MustBeDInt(args[0])) indexID := int(tree.MustBeDInt(args[1])) g := tree.MustBeDGeography(args[2]) - tableDesc, err := sqlbase.GetTableDescFromID(ctx.Context, ctx.Txn, sqlbase.ID(tableID)) + tableDesc, err := sqlbase.GetTableDescFromID(ctx.Context, ctx.Txn, ctx.Codec, sqlbase.ID(tableID)) if err != nil { return nil, err } @@ -3250,7 +3250,7 @@ may increase either contention or retry errors, or both.`, tableID := int(tree.MustBeDInt(args[0])) indexID := int(tree.MustBeDInt(args[1])) g := tree.MustBeDGeometry(args[2]) - tableDesc, err := sqlbase.GetTableDescFromID(ctx.Context, ctx.Txn, sqlbase.ID(tableID)) + tableDesc, err := sqlbase.GetTableDescFromID(ctx.Context, ctx.Txn, ctx.Codec, sqlbase.ID(tableID)) if err != nil { return nil, err } diff --git a/pkg/sql/set_zone_config.go b/pkg/sql/set_zone_config.go index c7076083e0cc..3f06ee85c579 100644 --- a/pkg/sql/set_zone_config.go +++ b/pkg/sql/set_zone_config.go @@ -894,7 +894,7 @@ func RemoveIndexZoneConfigs( tableID sqlbase.ID, indexDescs []sqlbase.IndexDescriptor, ) error { - tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, tableID) + tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, execCfg.Codec, tableID) if err != nil { return err } diff --git a/pkg/sql/span_builder_test.go b/pkg/sql/span_builder_test.go index 7943393feeea..e7be6e5b0b26 100644 --- a/pkg/sql/span_builder_test.go +++ b/pkg/sql/span_builder_test.go @@ -15,6 +15,7 @@ import ( "fmt" "testing" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/span" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/tests" @@ -97,7 +98,7 @@ func TestSpanBuilderCanSplitSpan(t *testing.T) { if _, err := sqlDB.Exec(sql); err != nil { t.Fatal(err) } - desc := sqlbase.GetTableDescriptor(kvDB, "t", "t") + desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "t") idx, _, err := desc.FindIndexByName(tc.index) if err != nil { t.Fatal(err) diff --git a/pkg/sql/sqlbase/keys.go b/pkg/sql/sqlbase/keys.go index 08d2616e2929..97432fd32539 100644 --- a/pkg/sql/sqlbase/keys.go +++ b/pkg/sql/sqlbase/keys.go @@ -23,8 +23,10 @@ import ( // versions >= 20.1. // Pass name == "" in order to generate the prefix key to use to scan over all // of the names for the specified parentID. -func MakeNameMetadataKey(parentID ID, parentSchemaID ID, name string) roachpb.Key { - k := keys.TODOSQLCodec.IndexPrefix(uint32(NamespaceTable.ID), uint32(NamespaceTable.PrimaryIndex.ID)) +func MakeNameMetadataKey( + codec keys.SQLCodec, parentID ID, parentSchemaID ID, name string, +) roachpb.Key { + k := codec.IndexPrefix(uint32(NamespaceTable.ID), uint32(NamespaceTable.PrimaryIndex.ID)) k = encoding.EncodeUvarintAscending(k, uint64(parentID)) k = encoding.EncodeUvarintAscending(k, uint64(parentSchemaID)) if name != "" { @@ -36,8 +38,10 @@ func MakeNameMetadataKey(parentID ID, parentSchemaID ID, name string) roachpb.Ke // DecodeNameMetadataKey returns the components that make up the // NameMetadataKey for version >= 20.1. -func DecodeNameMetadataKey(k roachpb.Key) (parentID ID, parentSchemaID ID, name string, err error) { - k, _, err = keys.TODOSQLCodec.DecodeTablePrefix(k) +func DecodeNameMetadataKey( + codec keys.SQLCodec, k roachpb.Key, +) (parentID ID, parentSchemaID ID, name string, err error) { + k, _, err = codec.DecodeTablePrefix(k) if err != nil { return 0, 0, "", err } @@ -76,8 +80,8 @@ func DecodeNameMetadataKey(k roachpb.Key) (parentID ID, parentSchemaID ID, name // MakeDeprecatedNameMetadataKey returns the key for a name, as expected by // versions < 20.1. Pass name == "" in order to generate the prefix key to use // to scan over all of the names for the specified parentID. -func MakeDeprecatedNameMetadataKey(parentID ID, name string) roachpb.Key { - k := keys.TODOSQLCodec.IndexPrefix( +func MakeDeprecatedNameMetadataKey(codec keys.SQLCodec, parentID ID, name string) roachpb.Key { + k := codec.IndexPrefix( uint32(DeprecatedNamespaceTable.ID), uint32(DeprecatedNamespaceTable.PrimaryIndex.ID)) k = encoding.EncodeUvarintAscending(k, uint64(parentID)) if name != "" { @@ -88,13 +92,13 @@ func MakeDeprecatedNameMetadataKey(parentID ID, name string) roachpb.Key { } // MakeAllDescsMetadataKey returns the key for all descriptors. -func MakeAllDescsMetadataKey() roachpb.Key { - return keys.TODOSQLCodec.DescMetadataPrefix() +func MakeAllDescsMetadataKey(codec keys.SQLCodec) roachpb.Key { + return codec.DescMetadataPrefix() } // MakeDescMetadataKey returns the key for the descriptor. -func MakeDescMetadataKey(descID ID) roachpb.Key { - return keys.TODOSQLCodec.DescMetadataKey(uint32(descID)) +func MakeDescMetadataKey(codec keys.SQLCodec, descID ID) roachpb.Key { + return codec.DescMetadataKey(uint32(descID)) } // IndexKeyValDirs returns the corresponding encoding.Directions for all the diff --git a/pkg/sql/sqlbase/keys_test.go b/pkg/sql/sqlbase/keys_test.go index 0c539c5a9a54..4cc41411b23d 100644 --- a/pkg/sql/sqlbase/keys_test.go +++ b/pkg/sql/sqlbase/keys_test.go @@ -20,15 +20,23 @@ import ( func TestKeyAddress(t *testing.T) { defer leaktest.AfterTest(t)() + tenSysCodec := keys.SystemSQLCodec + ten5Codec := keys.MakeSQLCodec(roachpb.MakeTenantID(5)) testCases := []struct { key roachpb.Key }{ - {MakeDescMetadataKey(123)}, - {MakeDescMetadataKey(124)}, - {NewPublicTableKey(0, "BAR").Key()}, - {NewPublicTableKey(1, "BAR").Key()}, - {NewPublicTableKey(1, "foo").Key()}, - {NewPublicTableKey(2, "foo").Key()}, + {MakeDescMetadataKey(tenSysCodec, 123)}, + {MakeDescMetadataKey(tenSysCodec, 124)}, + {NewPublicTableKey(0, "BAR").Key(tenSysCodec)}, + {NewPublicTableKey(1, "BAR").Key(tenSysCodec)}, + {NewPublicTableKey(1, "foo").Key(tenSysCodec)}, + {NewPublicTableKey(2, "foo").Key(tenSysCodec)}, + {MakeDescMetadataKey(ten5Codec, 123)}, + {MakeDescMetadataKey(ten5Codec, 124)}, + {NewPublicTableKey(0, "BAR").Key(ten5Codec)}, + {NewPublicTableKey(1, "BAR").Key(ten5Codec)}, + {NewPublicTableKey(1, "foo").Key(ten5Codec)}, + {NewPublicTableKey(2, "foo").Key(ten5Codec)}, } var lastKey roachpb.Key for i, test := range testCases { diff --git a/pkg/sql/sqlbase/metadata.go b/pkg/sql/sqlbase/metadata.go index e99bb687f74d..2328f7a8fc0e 100644 --- a/pkg/sql/sqlbase/metadata.go +++ b/pkg/sql/sqlbase/metadata.go @@ -31,7 +31,7 @@ var _ DescriptorProto = &TableDescriptor{} // databaseKey and tableKey. It is used to easily get the // descriptor key and plain name. type DescriptorKey interface { - Key() roachpb.Key + Key(codec keys.SQLCodec) roachpb.Key Name() string } @@ -72,7 +72,7 @@ func WrapDescriptor(descriptor DescriptorProto) *Descriptor { // installed on the underlying persistent storage before a cockroach store can // start running correctly, thus requiring this special initialization. type MetadataSchema struct { - // TODO(nvanbenschoten): add roachpb.TenantID here. Use in GetInitialValues. + codec keys.SQLCodec descs []metadataDescriptor otherSplitIDs []uint32 otherKV []roachpb.KeyValue @@ -86,9 +86,11 @@ type metadataDescriptor struct { // MakeMetadataSchema constructs a new MetadataSchema value which constructs // the "system" database. func MakeMetadataSchema( - defaultZoneConfig *zonepb.ZoneConfig, defaultSystemZoneConfig *zonepb.ZoneConfig, + codec keys.SQLCodec, + defaultZoneConfig *zonepb.ZoneConfig, + defaultSystemZoneConfig *zonepb.ZoneConfig, ) MetadataSchema { - ms := MetadataSchema{} + ms := MetadataSchema{codec: codec} addSystemDatabaseToSchema(&ms, defaultZoneConfig, defaultSystemZoneConfig) return ms } @@ -153,7 +155,7 @@ func (ms MetadataSchema) GetInitialValues( if bootstrapVersion.IsActive(clusterversion.VersionNamespaceTableWithSchemas) { if parentID != keys.RootNamespaceID { ret = append(ret, roachpb.KeyValue{ - Key: NewPublicTableKey(parentID, desc.GetName()).Key(), + Key: NewPublicTableKey(parentID, desc.GetName()).Key(ms.codec), Value: value, }) } else { @@ -164,17 +166,17 @@ func (ms MetadataSchema) GetInitialValues( ret = append( ret, roachpb.KeyValue{ - Key: NewDatabaseKey(desc.GetName()).Key(), + Key: NewDatabaseKey(desc.GetName()).Key(ms.codec), Value: value, }, roachpb.KeyValue{ - Key: NewPublicSchemaKey(desc.GetID()).Key(), + Key: NewPublicSchemaKey(desc.GetID()).Key(ms.codec), Value: publicSchemaValue, }) } } else { ret = append(ret, roachpb.KeyValue{ - Key: NewDeprecatedTableKey(parentID, desc.GetName()).Key(), + Key: NewDeprecatedTableKey(parentID, desc.GetName()).Key(ms.codec), Value: value, }) } @@ -186,11 +188,11 @@ func (ms MetadataSchema) GetInitialValues( log.Fatalf(context.TODO(), "could not marshal %v", desc) } ret = append(ret, roachpb.KeyValue{ - Key: MakeDescMetadataKey(desc.GetID()), + Key: MakeDescMetadataKey(ms.codec, desc.GetID()), Value: value, }) if desc.GetID() > keys.MaxSystemConfigDescID { - splits = append(splits, roachpb.RKey(keys.TODOSQLCodec.TablePrefix(uint32(desc.GetID())))) + splits = append(splits, roachpb.RKey(ms.codec.TablePrefix(uint32(desc.GetID())))) } } @@ -201,7 +203,7 @@ func (ms MetadataSchema) GetInitialValues( } for _, id := range ms.otherSplitIDs { - splits = append(splits, roachpb.RKey(keys.TODOSQLCodec.TablePrefix(id))) + splits = append(splits, roachpb.RKey(ms.codec.TablePrefix(id))) } // Other key/value generation that doesn't fit into databases and diff --git a/pkg/sql/sqlbase/namespace.go b/pkg/sql/sqlbase/namespace.go index 2fb074cfacdb..ae01b9bd0735 100644 --- a/pkg/sql/sqlbase/namespace.go +++ b/pkg/sql/sqlbase/namespace.go @@ -59,7 +59,13 @@ import ( // RemoveObjectNamespaceEntry removes entries from both the deprecated and // new system.namespace table (if one exists). func RemoveObjectNamespaceEntry( - ctx context.Context, txn *kv.Txn, parentID ID, parentSchemaID ID, name string, KVTrace bool, + ctx context.Context, + txn *kv.Txn, + codec keys.SQLCodec, + parentID ID, + parentSchemaID ID, + name string, + KVTrace bool, ) error { b := txn.NewBatch() var toDelete []DescriptorKey @@ -81,7 +87,7 @@ func RemoveObjectNamespaceEntry( if KVTrace { log.VEventf(ctx, 2, "Del %s", delKey) } - b.Del(delKey.Key()) + b.Del(delKey.Key(codec)) } return txn.Run(ctx, b) } @@ -89,23 +95,25 @@ func RemoveObjectNamespaceEntry( // RemovePublicTableNamespaceEntry is a wrapper around RemoveObjectNamespaceEntry // for public tables. func RemovePublicTableNamespaceEntry( - ctx context.Context, txn *kv.Txn, parentID ID, name string, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, parentID ID, name string, ) error { - return RemoveObjectNamespaceEntry(ctx, txn, parentID, keys.PublicSchemaID, name, false /* KVTrace */) + return RemoveObjectNamespaceEntry(ctx, txn, codec, parentID, keys.PublicSchemaID, name, false /* KVTrace */) } // RemoveSchemaNamespaceEntry is a wrapper around RemoveObjectNamespaceEntry // for schemas. -func RemoveSchemaNamespaceEntry(ctx context.Context, txn *kv.Txn, parentID ID, name string) error { - return RemoveObjectNamespaceEntry(ctx, txn, parentID, keys.RootNamespaceID, name, false /* KVTrace */) +func RemoveSchemaNamespaceEntry( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, parentID ID, name string, +) error { + return RemoveObjectNamespaceEntry(ctx, txn, codec, parentID, keys.RootNamespaceID, name, false /* KVTrace */) } // RemoveDatabaseNamespaceEntry is a wrapper around RemoveObjectNamespaceEntry // for databases. func RemoveDatabaseNamespaceEntry( - ctx context.Context, txn *kv.Txn, name string, KVTrace bool, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, name string, KVTrace bool, ) error { - return RemoveObjectNamespaceEntry(ctx, txn, keys.RootNamespaceID, keys.RootNamespaceID, name, KVTrace) + return RemoveObjectNamespaceEntry(ctx, txn, codec, keys.RootNamespaceID, keys.RootNamespaceID, name, KVTrace) } // MakeObjectNameKey returns a key in the system.namespace table for @@ -149,7 +157,12 @@ func MakeDatabaseNameKey( // (parentID, parentSchemaID, name) supplied. If cluster version < 20.1, // the parentSchemaID is ignored. func LookupObjectID( - ctx context.Context, txn *kv.Txn, parentID ID, parentSchemaID ID, name string, + ctx context.Context, + txn *kv.Txn, + codec keys.SQLCodec, + parentID ID, + parentSchemaID ID, + name string, ) (bool, ID, error) { var key DescriptorKey if parentID == keys.RootNamespaceID { @@ -159,8 +172,8 @@ func LookupObjectID( } else { key = NewTableKey(parentID, parentSchemaID, name) } - log.Eventf(ctx, "looking up descriptor ID for name key %q", key.Key()) - res, err := txn.Get(ctx, key.Key()) + log.Eventf(ctx, "looking up descriptor ID for name key %q", key.Key(codec)) + res, err := txn.Get(ctx, key.Key(codec)) if err != nil { return false, InvalidID, err } @@ -190,8 +203,8 @@ func LookupObjectID( } else { dKey = NewDeprecatedTableKey(parentID, name) } - log.Eventf(ctx, "looking up descriptor ID for name key %q", dKey.Key()) - res, err = txn.Get(ctx, dKey.Key()) + log.Eventf(ctx, "looking up descriptor ID for name key %q", dKey.Key(codec)) + res, err = txn.Get(ctx, dKey.Key(codec)) if err != nil { return false, InvalidID, err } @@ -203,12 +216,14 @@ func LookupObjectID( // LookupPublicTableID is a wrapper around LookupObjectID for public tables. func LookupPublicTableID( - ctx context.Context, txn *kv.Txn, parentID ID, name string, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, parentID ID, name string, ) (bool, ID, error) { - return LookupObjectID(ctx, txn, parentID, keys.PublicSchemaID, name) + return LookupObjectID(ctx, txn, codec, parentID, keys.PublicSchemaID, name) } // LookupDatabaseID is a wrapper around LookupObjectID for databases. -func LookupDatabaseID(ctx context.Context, txn *kv.Txn, name string) (bool, ID, error) { - return LookupObjectID(ctx, txn, keys.RootNamespaceID, keys.RootNamespaceID, name) +func LookupDatabaseID( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, name string, +) (bool, ID, error) { + return LookupObjectID(ctx, txn, codec, keys.RootNamespaceID, keys.RootNamespaceID, name) } diff --git a/pkg/sql/sqlbase/structured.go b/pkg/sql/sqlbase/structured.go index 225064233949..7714561c58e2 100644 --- a/pkg/sql/sqlbase/structured.go +++ b/pkg/sql/sqlbase/structured.go @@ -335,10 +335,10 @@ type protoGetter interface { // ID passed in using an existing proto getter. Returns an error if the // descriptor doesn't exist or if it exists and is not a database. func GetDatabaseDescFromID( - ctx context.Context, protoGetter protoGetter, id ID, + ctx context.Context, protoGetter protoGetter, codec keys.SQLCodec, id ID, ) (*DatabaseDescriptor, error) { desc := &Descriptor{} - descKey := MakeDescMetadataKey(id) + descKey := MakeDescMetadataKey(codec, id) _, err := protoGetter.GetProtoTs(ctx, descKey, desc) if err != nil { return nil, err @@ -356,14 +356,14 @@ func GetDatabaseDescFromID( // NB: If this function changes, make sure to update GetTableDescFromIDWithFKsChanged // in a similar way. func GetTableDescFromID( - ctx context.Context, protoGetter protoGetter, id ID, + ctx context.Context, protoGetter protoGetter, codec keys.SQLCodec, id ID, ) (*TableDescriptor, error) { - table, err := getTableDescFromIDRaw(ctx, protoGetter, id) + table, err := getTableDescFromIDRaw(ctx, protoGetter, codec, id) if err != nil { return nil, err } - if err := table.MaybeFillInDescriptor(ctx, protoGetter); err != nil { + if err := table.MaybeFillInDescriptor(ctx, protoGetter, codec); err != nil { return nil, err } @@ -375,15 +375,15 @@ func GetTableDescFromID( // GetTableDescFromID but additionally returns whether or not the table descriptor // was changed during the foreign key upgrade process. func GetTableDescFromIDWithFKsChanged( - ctx context.Context, protoGetter protoGetter, id ID, + ctx context.Context, protoGetter protoGetter, codec keys.SQLCodec, id ID, ) (*TableDescriptor, bool, error) { - table, err := getTableDescFromIDRaw(ctx, protoGetter, id) + table, err := getTableDescFromIDRaw(ctx, protoGetter, codec, id) if err != nil { return nil, false, err } table.maybeUpgradeFormatVersion() table.Privileges.MaybeFixPrivileges(table.ID) - changed, err := table.MaybeUpgradeForeignKeyRepresentation(ctx, protoGetter, false /* skipFKsWithNoMatchingTable */) + changed, err := table.MaybeUpgradeForeignKeyRepresentation(ctx, protoGetter, codec, false /* skipFKsWithNoMatchingTable */) if err != nil { return nil, false, err } @@ -397,10 +397,10 @@ func GetTableDescFromIDWithFKsChanged( // migrations and is *required* before ordinary presentation to other code. This // method is for internal use only and shouldn't get exposed. func getTableDescFromIDRaw( - ctx context.Context, protoGetter protoGetter, id ID, + ctx context.Context, protoGetter protoGetter, codec keys.SQLCodec, id ID, ) (*TableDescriptor, error) { desc := &Descriptor{} - descKey := MakeDescMetadataKey(id) + descKey := MakeDescMetadataKey(codec, id) ts, err := protoGetter.GetProtoTs(ctx, descKey, desc) if err != nil { return nil, err @@ -417,9 +417,9 @@ func getTableDescFromIDRaw( // descriptor doesn't exist or if it exists and is not a table. // Otherwise a mutable copy of the table is returned. func GetMutableTableDescFromID( - ctx context.Context, protoGetter protoGetter, id ID, + ctx context.Context, protoGetter protoGetter, codec keys.SQLCodec, id ID, ) (*MutableTableDescriptor, error) { - table, err := GetTableDescFromID(ctx, protoGetter, id) + table, err := GetTableDescFromID(ctx, protoGetter, codec, id) if err != nil { return nil, err } @@ -885,12 +885,12 @@ func generatedFamilyName(familyID FamilyID, columnNames []string) string { // NB: If this function changes, make sure to update GetTableDescFromIDWithFKsChanged // in a similar way. func (desc *TableDescriptor) MaybeFillInDescriptor( - ctx context.Context, protoGetter protoGetter, + ctx context.Context, protoGetter protoGetter, codec keys.SQLCodec, ) error { desc.maybeUpgradeFormatVersion() desc.Privileges.MaybeFixPrivileges(desc.ID) if protoGetter != nil { - if _, err := desc.MaybeUpgradeForeignKeyRepresentation(ctx, protoGetter, false /* skipFKsWithNoMatchingTable*/); err != nil { + if _, err := desc.MaybeUpgradeForeignKeyRepresentation(ctx, protoGetter, codec, false /* skipFKsWithNoMatchingTable*/); err != nil { return err } } @@ -941,7 +941,10 @@ func (m MapProtoGetter) GetProtoTs( // is dropped from the table and no error is returned. // TODO(lucy): Write tests for when skipFKsWithNoMatchingTable is true. func (desc *TableDescriptor) MaybeUpgradeForeignKeyRepresentation( - ctx context.Context, protoGetter protoGetter, skipFKsWithNoMatchingTable bool, + ctx context.Context, + protoGetter protoGetter, + codec keys.SQLCodec, + skipFKsWithNoMatchingTable bool, ) (bool, error) { if desc.Dropped() { // If the table has been dropped, it's permitted to have corrupted foreign @@ -954,7 +957,7 @@ func (desc *TableDescriptor) MaybeUpgradeForeignKeyRepresentation( // cluster (after finalizing the upgrade) have foreign key mutations. for i := range desc.Indexes { newChanged, err := maybeUpgradeForeignKeyRepOnIndex( - ctx, protoGetter, otherUnupgradedTables, desc, &desc.Indexes[i], skipFKsWithNoMatchingTable, + ctx, protoGetter, codec, otherUnupgradedTables, desc, &desc.Indexes[i], skipFKsWithNoMatchingTable, ) if err != nil { return false, err @@ -962,7 +965,7 @@ func (desc *TableDescriptor) MaybeUpgradeForeignKeyRepresentation( changed = changed || newChanged } newChanged, err := maybeUpgradeForeignKeyRepOnIndex( - ctx, protoGetter, otherUnupgradedTables, desc, &desc.PrimaryIndex, skipFKsWithNoMatchingTable, + ctx, protoGetter, codec, otherUnupgradedTables, desc, &desc.PrimaryIndex, skipFKsWithNoMatchingTable, ) if err != nil { return false, err @@ -977,6 +980,7 @@ func (desc *TableDescriptor) MaybeUpgradeForeignKeyRepresentation( func maybeUpgradeForeignKeyRepOnIndex( ctx context.Context, protoGetter protoGetter, + codec keys.SQLCodec, otherUnupgradedTables map[ID]*TableDescriptor, desc *TableDescriptor, idx *IndexDescriptor, @@ -986,7 +990,7 @@ func maybeUpgradeForeignKeyRepOnIndex( if idx.ForeignKey.IsSet() { ref := &idx.ForeignKey if _, ok := otherUnupgradedTables[ref.Table]; !ok { - tbl, err := getTableDescFromIDRaw(ctx, protoGetter, ref.Table) + tbl, err := getTableDescFromIDRaw(ctx, protoGetter, codec, ref.Table) if err != nil { if err == ErrDescriptorNotFound && skipFKsWithNoMatchingTable { // Ignore this FK and keep going. @@ -1025,7 +1029,7 @@ func maybeUpgradeForeignKeyRepOnIndex( for refIdx := range idx.ReferencedBy { ref := &(idx.ReferencedBy[refIdx]) if _, ok := otherUnupgradedTables[ref.Table]; !ok { - tbl, err := getTableDescFromIDRaw(ctx, protoGetter, ref.Table) + tbl, err := getTableDescFromIDRaw(ctx, protoGetter, codec, ref.Table) if err != nil { if err == ErrDescriptorNotFound && skipFKsWithNoMatchingTable { // Ignore this FK and keep going. @@ -1563,7 +1567,7 @@ func (desc *MutableTableDescriptor) MaybeIncrementVersion( // Validate validates that the table descriptor is well formed. Checks include // both single table and cross table invariants. -func (desc *TableDescriptor) Validate(ctx context.Context, txn *kv.Txn) error { +func (desc *TableDescriptor) Validate(ctx context.Context, txn *kv.Txn, codec keys.SQLCodec) error { err := desc.ValidateTable() if err != nil { return err @@ -1571,15 +1575,17 @@ func (desc *TableDescriptor) Validate(ctx context.Context, txn *kv.Txn) error { if desc.Dropped() { return nil } - return desc.validateCrossReferences(ctx, txn) + return desc.validateCrossReferences(ctx, txn, codec) } // validateCrossReferences validates that each reference to another table is // resolvable and that the necessary back references exist. -func (desc *TableDescriptor) validateCrossReferences(ctx context.Context, txn *kv.Txn) error { +func (desc *TableDescriptor) validateCrossReferences( + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, +) error { // Check that parent DB exists. { - res, err := txn.Get(ctx, MakeDescMetadataKey(desc.ParentID)) + res, err := txn.Get(ctx, MakeDescMetadataKey(codec, desc.ParentID)) if err != nil { return err } @@ -1593,7 +1599,7 @@ func (desc *TableDescriptor) validateCrossReferences(ctx context.Context, txn *k if table, ok := tablesByID[id]; ok { return table, nil } - table, err := GetTableDescFromID(ctx, txn, id) + table, err := GetTableDescFromID(ctx, txn, codec, id) if err != nil { return nil, err } @@ -4076,8 +4082,8 @@ func NewDatabaseKey(name string) DatabaseKey { } // Key implements DescriptorKey interface. -func (dk DatabaseKey) Key() roachpb.Key { - return MakeNameMetadataKey(keys.RootNamespaceID, keys.RootNamespaceID, dk.name) +func (dk DatabaseKey) Key(codec keys.SQLCodec) roachpb.Key { + return MakeNameMetadataKey(codec, keys.RootNamespaceID, keys.RootNamespaceID, dk.name) } // Name implements DescriptorKey interface. @@ -4103,8 +4109,8 @@ func NewTableKey(parentID ID, parentSchemaID ID, name string) TableKey { } // Key implements DescriptorKey interface. -func (tk TableKey) Key() roachpb.Key { - return MakeNameMetadataKey(tk.parentID, tk.parentSchemaID, tk.name) +func (tk TableKey) Key(codec keys.SQLCodec) roachpb.Key { + return MakeNameMetadataKey(codec, tk.parentID, tk.parentSchemaID, tk.name) } // Name implements DescriptorKey interface. @@ -4129,8 +4135,8 @@ func NewPublicSchemaKey(parentID ID) SchemaKey { } // Key implements DescriptorKey interface. -func (sk SchemaKey) Key() roachpb.Key { - return MakeNameMetadataKey(sk.parentID, keys.RootNamespaceID, sk.name) +func (sk SchemaKey) Key(codec keys.SQLCodec) roachpb.Key { + return MakeNameMetadataKey(codec, sk.parentID, keys.RootNamespaceID, sk.name) } // Name implements DescriptorKey interface. @@ -4150,8 +4156,8 @@ func NewDeprecatedTableKey(parentID ID, name string) DeprecatedTableKey { } // Key implements DescriptorKey interface. -func (dtk DeprecatedTableKey) Key() roachpb.Key { - return MakeDeprecatedNameMetadataKey(dtk.parentID, dtk.name) +func (dtk DeprecatedTableKey) Key(codec keys.SQLCodec) roachpb.Key { + return MakeDeprecatedNameMetadataKey(codec, dtk.parentID, dtk.name) } // Name implements DescriptorKey interface. @@ -4170,8 +4176,8 @@ func NewDeprecatedDatabaseKey(name string) DeprecatedDatabaseKey { } // Key implements DescriptorKey interface. -func (ddk DeprecatedDatabaseKey) Key() roachpb.Key { - return MakeDeprecatedNameMetadataKey(keys.RootNamespaceID, ddk.name) +func (ddk DeprecatedDatabaseKey) Key(codec keys.SQLCodec) roachpb.Key { + return MakeDeprecatedNameMetadataKey(codec, keys.RootNamespaceID, ddk.name) } // Name implements DescriptorKey interface. diff --git a/pkg/sql/sqlbase/structured_test.go b/pkg/sql/sqlbase/structured_test.go index aff0564c3dab..b9c97fdd2612 100644 --- a/pkg/sql/sqlbase/structured_test.go +++ b/pkg/sql/sqlbase/structured_test.go @@ -840,7 +840,7 @@ func TestValidateCrossTableReferences(t *testing.T) { if err := v.SetProto(desc); err != nil { t.Fatal(err) } - if err := kvDB.Put(ctx, MakeDescMetadataKey(0), &v); err != nil { + if err := kvDB.Put(ctx, MakeDescMetadataKey(keys.SystemSQLCodec, 0), &v); err != nil { t.Fatal(err) } } @@ -853,18 +853,18 @@ func TestValidateCrossTableReferences(t *testing.T) { if err := v.SetProto(desc); err != nil { t.Fatal(err) } - if err := kvDB.Put(ctx, MakeDescMetadataKey(otherDesc.ID), &v); err != nil { + if err := kvDB.Put(ctx, MakeDescMetadataKey(keys.SystemSQLCodec, otherDesc.ID), &v); err != nil { t.Fatal(err) } } txn := kv.NewTxn(ctx, kvDB, s.NodeID()) - if err := test.desc.validateCrossReferences(ctx, txn); err == nil { + if err := test.desc.validateCrossReferences(ctx, txn, keys.SystemSQLCodec); err == nil { t.Errorf("%d: expected \"%s\", but found success: %+v", i, test.err, test.desc) } else if test.err != err.Error() && "internal error: "+test.err != err.Error() { t.Errorf("%d: expected \"%s\", but found \"%s\"", i, test.err, err.Error()) } for _, otherDesc := range test.otherDescs { - if err := kvDB.Del(ctx, MakeDescMetadataKey(otherDesc.ID)); err != nil { + if err := kvDB.Del(ctx, MakeDescMetadataKey(keys.SystemSQLCodec, otherDesc.ID)); err != nil { t.Fatal(err) } } diff --git a/pkg/sql/sqlbase/system.go b/pkg/sql/sqlbase/system.go index fad23e0c3932..2a021c6ac9b9 100644 --- a/pkg/sql/sqlbase/system.go +++ b/pkg/sql/sqlbase/system.go @@ -1457,13 +1457,15 @@ var ( ) // Create a kv pair for the zone config for the given key and config value. -func createZoneConfigKV(keyID int, zoneConfig *zonepb.ZoneConfig) roachpb.KeyValue { +func createZoneConfigKV( + keyID int, codec keys.SQLCodec, zoneConfig *zonepb.ZoneConfig, +) roachpb.KeyValue { value := roachpb.Value{} if err := value.SetProto(zoneConfig); err != nil { panic(fmt.Sprintf("could not marshal ZoneConfig for ID: %d: %s", keyID, err)) } return roachpb.KeyValue{ - Key: keys.TODOSQLCodec.ZoneKey(uint32(keyID)), + Key: codec.ZoneKey(uint32(keyID)), Value: value, } } @@ -1529,7 +1531,8 @@ func addSystemDatabaseToSchema( // and also created as a migration for older cluster. The includedInBootstrap // field should be set on the migration. - target.otherKV = append(target.otherKV, createZoneConfigKV(keys.RootNamespaceID, defaultZoneConfig)) + target.otherKV = append(target.otherKV, + createZoneConfigKV(keys.RootNamespaceID, target.codec, defaultZoneConfig)) systemZoneConf := defaultSystemZoneConfig metaRangeZoneConf := protoutil.Clone(defaultSystemZoneConfig).(*zonepb.ZoneConfig) @@ -1537,7 +1540,8 @@ func addSystemDatabaseToSchema( // .meta zone config entry with a shorter GC time. metaRangeZoneConf.GC.TTLSeconds = 60 * 60 // 1h - target.otherKV = append(target.otherKV, createZoneConfigKV(keys.MetaRangesID, metaRangeZoneConf)) + target.otherKV = append(target.otherKV, + createZoneConfigKV(keys.MetaRangesID, target.codec, metaRangeZoneConf)) // Some reporting tables have shorter GC times. replicationConstraintStatsZoneConf := &zonepb.ZoneConfig{ @@ -1549,13 +1553,16 @@ func addSystemDatabaseToSchema( // Liveness zone config entry with a shorter GC time. livenessZoneConf.GC.TTLSeconds = 10 * 60 // 10m - target.otherKV = append(target.otherKV, createZoneConfigKV(keys.LivenessRangesID, livenessZoneConf)) - target.otherKV = append(target.otherKV, createZoneConfigKV(keys.SystemRangesID, systemZoneConf)) - target.otherKV = append(target.otherKV, createZoneConfigKV(keys.SystemDatabaseID, systemZoneConf)) target.otherKV = append(target.otherKV, - createZoneConfigKV(keys.ReplicationConstraintStatsTableID, replicationConstraintStatsZoneConf)) + createZoneConfigKV(keys.LivenessRangesID, target.codec, livenessZoneConf)) + target.otherKV = append(target.otherKV, + createZoneConfigKV(keys.SystemRangesID, target.codec, systemZoneConf)) + target.otherKV = append(target.otherKV, + createZoneConfigKV(keys.SystemDatabaseID, target.codec, systemZoneConf)) + target.otherKV = append(target.otherKV, + createZoneConfigKV(keys.ReplicationConstraintStatsTableID, target.codec, replicationConstraintStatsZoneConf)) target.otherKV = append(target.otherKV, - createZoneConfigKV(keys.ReplicationStatsTableID, replicationStatsZoneConf)) + createZoneConfigKV(keys.ReplicationStatsTableID, target.codec, replicationStatsZoneConf)) } // IsSystemConfigID returns whether this ID is for a system config object. diff --git a/pkg/sql/sqlbase/table.go b/pkg/sql/sqlbase/table.go index 6e6827c39b92..0b35d1e43c8e 100644 --- a/pkg/sql/sqlbase/table.go +++ b/pkg/sql/sqlbase/table.go @@ -16,6 +16,7 @@ import ( "sort" "strings" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -337,12 +338,12 @@ type tableLookupFn func(ID) (*TableDescriptor, error) // GetConstraintInfo returns a summary of all constraints on the table. func (desc *TableDescriptor) GetConstraintInfo( - ctx context.Context, txn *kv.Txn, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, ) (map[string]ConstraintDetail, error) { var tableLookup tableLookupFn if txn != nil { tableLookup = func(id ID) (*TableDescriptor, error) { - return GetTableDescFromID(ctx, txn, id) + return GetTableDescFromID(ctx, txn, codec, id) } } return desc.collectConstraintInfo(tableLookup) @@ -578,9 +579,9 @@ func FindFKOriginIndexInTxn( // because the marshaling is not guaranteed to be stable and also because it's // sensitive to things like missing vs default values of fields. func ConditionalGetTableDescFromTxn( - ctx context.Context, txn *kv.Txn, expectation *TableDescriptor, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, expectation *TableDescriptor, ) (*roachpb.Value, error) { - key := MakeDescMetadataKey(expectation.ID) + key := MakeDescMetadataKey(codec, expectation.ID) existingKV, err := txn.Get(ctx, key) if err != nil { return nil, err diff --git a/pkg/sql/sqlbase/table_test.go b/pkg/sql/sqlbase/table_test.go index 7c0ee8f91a90..efc11e946c6f 100644 --- a/pkg/sql/sqlbase/table_test.go +++ b/pkg/sql/sqlbase/table_test.go @@ -881,9 +881,9 @@ func TestAdjustStartKeyForInterleave(t *testing.T) { // parent (pid1) // child (pid1, cid1, cid2) // grandchild (pid1, cid1, cid2, gcid1) - parent := GetTableDescriptor(kvDB, sqlutils.TestDB, "parent1") - child := GetTableDescriptor(kvDB, sqlutils.TestDB, "child1") - grandchild := GetTableDescriptor(kvDB, sqlutils.TestDB, "grandchild1") + parent := GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "parent1") + child := GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "child1") + grandchild := GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "grandchild1") parentDescIdx := parent.Indexes[0] childDescIdx := child.Indexes[0] @@ -1093,9 +1093,9 @@ func TestAdjustEndKeyForInterleave(t *testing.T) { // parent (pid1) // child (pid1, cid1, cid2) // grandchild (pid1, cid1, cid2, gcid1) - parent := GetTableDescriptor(kvDB, sqlutils.TestDB, "parent1") - child := GetTableDescriptor(kvDB, sqlutils.TestDB, "child1") - grandchild := GetTableDescriptor(kvDB, sqlutils.TestDB, "grandchild1") + parent := GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "parent1") + child := GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "child1") + grandchild := GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, "grandchild1") parentDescIdx := parent.Indexes[0] childDescIdx := child.Indexes[0] diff --git a/pkg/sql/sqlbase/testutils.go b/pkg/sql/sqlbase/testutils.go index 3b9c90d08c79..f9da92241eed 100644 --- a/pkg/sql/sqlbase/testutils.go +++ b/pkg/sql/sqlbase/testutils.go @@ -49,12 +49,14 @@ import ( // This file contains utility functions for tests (in other packages). // GetTableDescriptor retrieves a table descriptor directly from the KV layer. -func GetTableDescriptor(kvDB *kv.DB, database string, table string) *TableDescriptor { +func GetTableDescriptor( + kvDB *kv.DB, codec keys.SQLCodec, database string, table string, +) *TableDescriptor { // log.VEventf(context.TODO(), 2, "GetTableDescriptor %q %q", database, table) // testutil, so we pass settings as nil for both database and table name keys. dKey := NewDatabaseKey(database) ctx := context.TODO() - gr, err := kvDB.Get(ctx, dKey.Key()) + gr, err := kvDB.Get(ctx, dKey.Key(codec)) if err != nil { panic(err) } @@ -64,7 +66,7 @@ func GetTableDescriptor(kvDB *kv.DB, database string, table string) *TableDescri dbDescID := ID(gr.ValueInt()) tKey := NewPublicTableKey(dbDescID, table) - gr, err = kvDB.Get(ctx, tKey.Key()) + gr, err = kvDB.Get(ctx, tKey.Key(codec)) if err != nil { panic(err) } @@ -72,7 +74,7 @@ func GetTableDescriptor(kvDB *kv.DB, database string, table string) *TableDescri panic("table missing") } - descKey := MakeDescMetadataKey(ID(gr.ValueInt())) + descKey := MakeDescMetadataKey(codec, ID(gr.ValueInt())) desc := &Descriptor{} ts, err := kvDB.GetProtoTs(ctx, descKey, desc) if err != nil || (*desc == Descriptor{}) { @@ -82,7 +84,7 @@ func GetTableDescriptor(kvDB *kv.DB, database string, table string) *TableDescri if tableDesc == nil { return nil } - err = tableDesc.MaybeFillInDescriptor(ctx, kvDB) + err = tableDesc.MaybeFillInDescriptor(ctx, kvDB, codec) if err != nil { log.Fatalf(ctx, "failure to fill in descriptor. err: %v", err) } @@ -91,9 +93,9 @@ func GetTableDescriptor(kvDB *kv.DB, database string, table string) *TableDescri // GetImmutableTableDescriptor retrieves an immutable table descriptor directly from the KV layer. func GetImmutableTableDescriptor( - kvDB *kv.DB, database string, table string, + kvDB *kv.DB, codec keys.SQLCodec, database string, table string, ) *ImmutableTableDescriptor { - return NewImmutableTableDescriptor(*GetTableDescriptor(kvDB, database, table)) + return NewImmutableTableDescriptor(*GetTableDescriptor(kvDB, codec, database, table)) } // RandDatum generates a random Datum of the given type. diff --git a/pkg/sql/sqlbase/utils_test.go b/pkg/sql/sqlbase/utils_test.go index c4b3db8e9ad6..5e7eb5b74ef1 100644 --- a/pkg/sql/sqlbase/utils_test.go +++ b/pkg/sql/sqlbase/utils_test.go @@ -61,7 +61,7 @@ func EncodeTestKey(tb testing.TB, kvDB *kv.DB, codec keys.SQLCodec, keyStr strin // Encode the table ID if the token is a table name. if tableNames[tok] { - desc := GetTableDescriptor(kvDB, sqlutils.TestDB, tok) + desc := GetTableDescriptor(kvDB, keys.SystemSQLCodec, sqlutils.TestDB, tok) key = encoding.EncodeUvarintAscending(key, uint64(desc.ID)) continue } diff --git a/pkg/sql/stats/automatic_stats_test.go b/pkg/sql/stats/automatic_stats_test.go index 9168a29fdde1..29867a19fb82 100644 --- a/pkg/sql/stats/automatic_stats_test.go +++ b/pkg/sql/stats/automatic_stats_test.go @@ -19,6 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -53,7 +54,7 @@ func TestMaybeRefreshStats(t *testing.T) { CREATE VIEW t.vw AS SELECT k, k+1 FROM t.a;`) executor := s.InternalExecutor().(sqlutil.InternalExecutor) - descA := sqlbase.GetTableDescriptor(s.DB(), "t", "a") + descA := sqlbase.GetTableDescriptor(s.DB(), keys.SystemSQLCodec, "t", "a") cache := NewTableStatisticsCache(10 /* cacheSize */, s.GossipI().(*gossip.Gossip), kvDB, executor) refresher := MakeRefresher(st, executor, cache, time.Microsecond /* asOfTime */) @@ -92,7 +93,7 @@ func TestMaybeRefreshStats(t *testing.T) { // Ensure that attempt to refresh stats on view does not result in re- // enqueuing the attempt. // TODO(rytaft): Should not enqueue views to begin with. - descVW := sqlbase.GetTableDescriptor(s.DB(), "t", "vw") + descVW := sqlbase.GetTableDescriptor(s.DB(), keys.SystemSQLCodec, "t", "vw") refresher.maybeRefreshStats( ctx, s.Stopper(), descVW.ID, 0 /* rowsAffected */, time.Microsecond, /* asOf */ ) @@ -123,7 +124,7 @@ func TestAverageRefreshTime(t *testing.T) { INSERT INTO t.a VALUES (1);`) executor := s.InternalExecutor().(sqlutil.InternalExecutor) - tableID := sqlbase.GetTableDescriptor(s.DB(), "t", "a").ID + tableID := sqlbase.GetTableDescriptor(s.DB(), keys.SystemSQLCodec, "t", "a").ID cache := NewTableStatisticsCache(10 /* cacheSize */, s.GossipI().(*gossip.Gossip), kvDB, executor) refresher := MakeRefresher(st, executor, cache, time.Microsecond /* asOfTime */) diff --git a/pkg/sql/stats/gossip_invalidation_test.go b/pkg/sql/stats/gossip_invalidation_test.go index 74b959357ab6..18980b00492d 100644 --- a/pkg/sql/stats/gossip_invalidation_test.go +++ b/pkg/sql/stats/gossip_invalidation_test.go @@ -17,6 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/sql/stats" @@ -47,7 +48,7 @@ func TestGossipInvalidation(t *testing.T) { sr0.Exec(t, "CREATE TABLE test.t (k INT PRIMARY KEY, v INT)") sr0.Exec(t, "INSERT INTO test.t VALUES (1, 1), (2, 2), (3, 3)") - tableDesc := sqlbase.GetTableDescriptor(tc.Server(0).DB(), "test", "t") + tableDesc := sqlbase.GetTableDescriptor(tc.Server(0).DB(), keys.SystemSQLCodec, "test", "t") tableID := tableDesc.ID expectNStats := func(n int) error { diff --git a/pkg/sql/table.go b/pkg/sql/table.go index be86ee32f079..de468885b33f 100644 --- a/pkg/sql/table.go +++ b/pkg/sql/table.go @@ -234,7 +234,7 @@ func (tc *TableCollection) getMutableTableDescriptor( } phyAccessor := UncachedPhysicalAccessor{} - obj, err := phyAccessor.GetObjectDesc(ctx, txn, tc.settings, tn, flags) + obj, err := phyAccessor.GetObjectDesc(ctx, txn, tc.settings, tc.codec(), tn, flags) if obj == nil { return nil, err } @@ -263,7 +263,7 @@ func (tc *TableCollection) resolveSchemaID( } // Next, try lookup the result from KV, storing and returning the value. - exists, schemaID, err := resolveSchemaID(ctx, txn, dbID, schemaName) + exists, schemaID, err := resolveSchemaID(ctx, txn, tc.codec(), dbID, schemaName) if err != nil || !exists { return exists, schemaID, err } @@ -295,7 +295,7 @@ func (tc *TableCollection) getTableVersion( readTableFromStore := func() (*sqlbase.ImmutableTableDescriptor, error) { phyAccessor := UncachedPhysicalAccessor{} - obj, err := phyAccessor.GetObjectDesc(ctx, txn, tc.settings, tn, flags) + obj, err := phyAccessor.GetObjectDesc(ctx, txn, tc.settings, tc.codec(), tn, flags) if obj == nil { return nil, err } @@ -410,7 +410,7 @@ func (tc *TableCollection) getTableVersionByID( log.VEventf(ctx, 2, "planner getting table on table ID %d", tableID) if flags.AvoidCached || testDisableTableLeases { - table, err := sqlbase.GetTableDescFromID(ctx, txn, tableID) + table, err := sqlbase.GetTableDescFromID(ctx, txn, tc.codec(), tableID) if err != nil { return nil, err } @@ -479,7 +479,7 @@ func (tc *TableCollection) getMutableTableVersionByID( log.VEventf(ctx, 2, "found uncommitted table %d", tableID) return table, nil } - return sqlbase.GetMutableTableDescFromID(ctx, txn, tableID) + return sqlbase.GetMutableTableDescFromID(ctx, txn, tc.codec(), tableID) } // releaseTableLeases releases the leases for the tables with ids in @@ -731,7 +731,7 @@ func (tc *TableCollection) getAllDescriptors( ctx context.Context, txn *kv.Txn, ) ([]sqlbase.DescriptorProto, error) { if tc.allDescriptors == nil { - descs, err := GetAllDescriptors(ctx, txn) + descs, err := GetAllDescriptors(ctx, txn, tc.codec()) if err != nil { return nil, err } @@ -748,11 +748,11 @@ func (tc *TableCollection) getAllDatabaseDescriptors( ctx context.Context, txn *kv.Txn, ) ([]*sqlbase.DatabaseDescriptor, error) { if tc.allDatabaseDescriptors == nil { - dbDescIDs, err := GetAllDatabaseDescriptorIDs(ctx, txn) + dbDescIDs, err := GetAllDatabaseDescriptorIDs(ctx, txn, tc.codec()) if err != nil { return nil, err } - dbDescs, err := getDatabaseDescriptorsFromIDs(ctx, txn, dbDescIDs) + dbDescs, err := getDatabaseDescriptorsFromIDs(ctx, txn, tc.codec(), dbDescIDs) if err != nil { return nil, err } @@ -766,11 +766,11 @@ func (tc *TableCollection) getAllDatabaseDescriptors( // database. It attempts to perform this operation in a single request, // rather than making a round trip for each ID. func getDatabaseDescriptorsFromIDs( - ctx context.Context, txn *kv.Txn, ids []sqlbase.ID, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, ids []sqlbase.ID, ) ([]*sqlbase.DatabaseDescriptor, error) { b := txn.NewBatch() for _, id := range ids { - key := sqlbase.MakeDescMetadataKey(id) + key := sqlbase.MakeDescMetadataKey(codec, id) b.Get(key) } if err := txn.Run(ctx, b); err != nil { @@ -816,7 +816,7 @@ func (tc *TableCollection) getSchemasForDatabase( } if _, ok := tc.allSchemasForDatabase[dbID]; !ok { var err error - tc.allSchemasForDatabase[dbID], err = schema.GetForDatabase(ctx, txn, dbID) + tc.allSchemasForDatabase[dbID], err = schema.GetForDatabase(ctx, txn, tc.codec(), dbID) if err != nil { return nil, err } @@ -866,6 +866,10 @@ func (tc *TableCollection) validatePrimaryKeys() error { return nil } +func (tc *TableCollection) codec() keys.SQLCodec { + return tc.leaseMgr.codec +} + // MigrationSchemaChangeRequiredContext flags a schema change as necessary to // run even in a mixed-version 19.2/20.1 state where schema changes are normally // banned, because the schema change is being run in a startup migration. It's @@ -1129,5 +1133,13 @@ func (p *planner) writeTableDescToBatch( return err } - return writeDescToBatch(ctx, p.extendedEvalCtx.Tracing.KVTracingEnabled(), p.execCfg.Settings, b, tableDesc.GetID(), tableDesc.TableDesc()) + return writeDescToBatch( + ctx, + p.extendedEvalCtx.Tracing.KVTracingEnabled(), + p.ExecCfg().Settings, + b, + p.ExecCfg().Codec, + tableDesc.GetID(), + tableDesc.TableDesc(), + ) } diff --git a/pkg/sql/table_ref_test.go b/pkg/sql/table_ref_test.go index d895f1c2b4b1..8a2ab1dd96f1 100644 --- a/pkg/sql/table_ref_test.go +++ b/pkg/sql/table_ref_test.go @@ -15,6 +15,7 @@ import ( "fmt" "testing" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -41,7 +42,7 @@ CREATE INDEX bc ON test.t(b, c); } // Retrieve the numeric descriptors. - tableDesc := sqlbase.GetTableDescriptor(kvDB, "test", "t") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") tID := tableDesc.ID var aID, bID, cID sqlbase.ColumnID for i := range tableDesc.Columns { @@ -59,7 +60,7 @@ CREATE INDEX bc ON test.t(b, c); secID := tableDesc.Indexes[0].ID // Retrieve the numeric descriptors. - tableDesc = sqlbase.GetTableDescriptor(kvDB, "test", "hidden") + tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "hidden") tIDHidden := tableDesc.ID var rowIDHidden sqlbase.ColumnID for i := range tableDesc.Columns { diff --git a/pkg/sql/temporary_schema.go b/pkg/sql/temporary_schema.go index ebd84f90428b..59882d34610e 100644 --- a/pkg/sql/temporary_schema.go +++ b/pkg/sql/temporary_schema.go @@ -17,6 +17,7 @@ import ( "strings" "time" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" @@ -86,7 +87,7 @@ func createTempSchema(params runParams, sKey sqlbase.DescriptorKey) (sqlbase.ID, if err != nil { return sqlbase.InvalidID, err } - if err := params.p.createSchemaWithID(params.ctx, sKey.Key(), id); err != nil { + if err := params.p.createSchemaWithID(params.ctx, sKey.Key(params.ExecCfg().Codec), id); err != nil { return sqlbase.InvalidID, err } @@ -137,9 +138,9 @@ func temporarySchemaSessionID(scName string) (bool, ClusterWideID, error) { // getTemporaryObjectNames returns all the temporary objects under the // temporary schema of the given dbID. func getTemporaryObjectNames( - ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, tempSchemaName string, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, dbID sqlbase.ID, tempSchemaName string, ) (TableNames, error) { - dbDesc, err := MustGetDatabaseDescByID(ctx, txn, dbID) + dbDesc, err := MustGetDatabaseDescByID(ctx, txn, codec, dbID) if err != nil { return nil, err } @@ -147,6 +148,7 @@ func getTemporaryObjectNames( return a.GetObjectNames( ctx, txn, + codec, dbDesc, tempSchemaName, tree.DatabaseListFlags{CommonLookupFlags: tree.CommonLookupFlags{Required: false}}, @@ -159,6 +161,7 @@ func cleanupSessionTempObjects( ctx context.Context, settings *cluster.Settings, db *kv.DB, + codec keys.SQLCodec, ie sqlutil.InternalExecutor, sessionID ClusterWideID, ) error { @@ -166,7 +169,7 @@ func cleanupSessionTempObjects( return db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { // We are going to read all database descriptor IDs, then for each database // we will drop all the objects under the temporary schema. - dbIDs, err := GetAllDatabaseDescriptorIDs(ctx, txn) + dbIDs, err := GetAllDatabaseDescriptorIDs(ctx, txn, codec) if err != nil { return err } @@ -175,6 +178,7 @@ func cleanupSessionTempObjects( ctx, settings, txn, + codec, ie, id, tempSchemaName, @@ -185,7 +189,7 @@ func cleanupSessionTempObjects( // itself may still exist (eg. a temporary table was created and then // dropped). So we remove the namespace table entry of the temporary // schema. - if err := sqlbase.RemoveSchemaNamespaceEntry(ctx, txn, id, tempSchemaName); err != nil { + if err := sqlbase.RemoveSchemaNamespaceEntry(ctx, txn, codec, id, tempSchemaName); err != nil { return err } } @@ -198,11 +202,12 @@ func cleanupSchemaObjects( ctx context.Context, settings *cluster.Settings, txn *kv.Txn, + codec keys.SQLCodec, ie sqlutil.InternalExecutor, dbID sqlbase.ID, schemaName string, ) error { - tbNames, err := getTemporaryObjectNames(ctx, txn, dbID, schemaName) + tbNames, err := getTemporaryObjectNames(ctx, txn, codec, dbID, schemaName) if err != nil { return err } @@ -226,6 +231,7 @@ func cleanupSchemaObjects( ctx, txn, settings, + codec, &tbName, tree.ObjectLookupFlagsWithRequired(), ) @@ -273,17 +279,18 @@ func cleanupSchemaObjects( if _, ok := descsByID[d.ID]; ok { continue } - dTableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, d.ID) + dTableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, codec, d.ID) if err != nil { return err } - db, err := sqlbase.GetDatabaseDescFromID(ctx, txn, dTableDesc.GetParentID()) + db, err := sqlbase.GetDatabaseDescFromID(ctx, txn, codec, dTableDesc.GetParentID()) if err != nil { return err } schema, err := schema.ResolveNameByID( ctx, txn, + codec, dTableDesc.GetParentID(), dTableDesc.GetParentSchemaID(), ) @@ -362,6 +369,7 @@ type isMeta1LeaseholderFunc func(hlc.Timestamp) (bool, error) type TemporaryObjectCleaner struct { settings *cluster.Settings db *kv.DB + codec keys.SQLCodec makeSessionBoundInternalExecutor sqlutil.SessionBoundInternalExecutorFactory // statusServer gives access to the Status service. statusServer serverpb.OptionalStatusServer @@ -388,6 +396,7 @@ func (m *temporaryObjectCleanerMetrics) MetricStruct() {} func NewTemporaryObjectCleaner( settings *cluster.Settings, db *kv.DB, + codec keys.SQLCodec, registry *metric.Registry, makeSessionBoundInternalExecutor sqlutil.SessionBoundInternalExecutorFactory, statusServer serverpb.OptionalStatusServer, @@ -399,6 +408,7 @@ func NewTemporaryObjectCleaner( return &TemporaryObjectCleaner{ settings: settings, db: db, + codec: codec, makeSessionBoundInternalExecutor: makeSessionBoundInternalExecutor, statusServer: statusServer, isMeta1LeaseholderFunc: isMeta1LeaseholderFunc, @@ -464,7 +474,7 @@ func (c *TemporaryObjectCleaner) doTemporaryObjectCleanup( var dbIDs []sqlbase.ID if err := retryFunc(ctx, func() error { var err error - dbIDs, err = GetAllDatabaseDescriptorIDs(ctx, txn) + dbIDs, err = GetAllDatabaseDescriptorIDs(ctx, txn, c.codec) return err }); err != nil { return err @@ -475,7 +485,7 @@ func (c *TemporaryObjectCleaner) doTemporaryObjectCleanup( var schemaNames map[sqlbase.ID]string if err := retryFunc(ctx, func() error { var err error - schemaNames, err = schema.GetForDatabase(ctx, txn, dbID) + schemaNames, err = schema.GetForDatabase(ctx, txn, c.codec, dbID) return err }); err != nil { return err @@ -535,6 +545,7 @@ func (c *TemporaryObjectCleaner) doTemporaryObjectCleanup( ctx, c.settings, c.db, + c.codec, ie, sessionID, ) diff --git a/pkg/sql/temporary_schema_test.go b/pkg/sql/temporary_schema_test.go index 675a8c197883..f2a993536b54 100644 --- a/pkg/sql/temporary_schema_test.go +++ b/pkg/sql/temporary_schema_test.go @@ -95,10 +95,12 @@ INSERT INTO perm_table VALUES (DEFAULT, 1); require.NoError( t, kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + execCfg := s.ExecutorConfig().(ExecutorConfig) err = cleanupSchemaObjects( ctx, - s.ExecutorConfig().(ExecutorConfig).Settings, + execCfg.Settings, txn, + execCfg.Codec, s.InternalExecutor().(*InternalExecutor), namesToID["defaultdb"], schemaName, diff --git a/pkg/sql/tests/hash_sharded_test.go b/pkg/sql/tests/hash_sharded_test.go index c4446ea1cc8f..08e6a8287854 100644 --- a/pkg/sql/tests/hash_sharded_test.go +++ b/pkg/sql/tests/hash_sharded_test.go @@ -15,6 +15,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -102,7 +103,7 @@ func TestBasicHashShardedIndexes(t *testing.T) { if _, err := db.Exec(`CREATE INDEX foo ON kv_primary (v)`); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, `d`, `kv_primary`) + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, `d`, `kv_primary`) verifyTableDescriptorState(t, tableDesc, "primary" /* shardedIndexName */) shardColID := getShardColumnID(t, tableDesc, "primary" /* shardedIndexName */) @@ -135,7 +136,7 @@ func TestBasicHashShardedIndexes(t *testing.T) { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, `d`, `kv_secondary`) + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, `d`, `kv_secondary`) verifyTableDescriptorState(t, tableDesc, "sharded_secondary" /* shardedIndexName */) }) @@ -152,7 +153,7 @@ func TestBasicHashShardedIndexes(t *testing.T) { if _, err := db.Exec(`CREATE INDEX sharded_secondary2 ON kv_secondary2 (k) USING HASH WITH BUCKET_COUNT = 12`); err != nil { t.Fatal(err) } - tableDesc := sqlbase.GetTableDescriptor(kvDB, `d`, `kv_secondary2`) + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, `d`, `kv_secondary2`) verifyTableDescriptorState(t, tableDesc, "sharded_secondary2" /* shardedIndexName */) }) } diff --git a/pkg/sql/tests/system_table_test.go b/pkg/sql/tests/system_table_test.go index eabaeefeb127..26f061803adf 100644 --- a/pkg/sql/tests/system_table_test.go +++ b/pkg/sql/tests/system_table_test.go @@ -33,7 +33,7 @@ func TestInitialKeys(t *testing.T) { const keysPerDesc = 2 const nonDescKeys = 9 - ms := sqlbase.MakeMetadataSchema(zonepb.DefaultZoneConfigRef(), zonepb.DefaultSystemZoneConfigRef()) + ms := sqlbase.MakeMetadataSchema(keys.SystemSQLCodec, zonepb.DefaultZoneConfigRef(), zonepb.DefaultSystemZoneConfigRef()) kv, _ /* splits */ := ms.GetInitialValues(clusterversion.TestingClusterVersion) expected := nonDescKeys + keysPerDesc*ms.SystemDescriptorCount() if actual := len(kv); actual != expected { diff --git a/pkg/sql/truncate.go b/pkg/sql/truncate.go index 0b53783d029f..0e0adf07e9e7 100644 --- a/pkg/sql/truncate.go +++ b/pkg/sql/truncate.go @@ -196,13 +196,13 @@ func (p *planner) truncateTable( tableDesc.ParentID, tableDesc.GetParentSchemaID(), tableDesc.GetName(), - ).Key() + ).Key(p.ExecCfg().Codec) key := sqlbase.MakeObjectNameKey( ctx, p.ExecCfg().Settings, newTableDesc.ParentID, newTableDesc.GetParentSchemaID(), newTableDesc.Name, - ).Key() + ).Key(p.ExecCfg().Codec) b := &kv.Batch{} // Use CPut because we want to remove a specific name -> id map. diff --git a/pkg/sql/unsplit.go b/pkg/sql/unsplit.go index 627a60229d52..42fd22534506 100644 --- a/pkg/sql/unsplit.go +++ b/pkg/sql/unsplit.go @@ -94,7 +94,9 @@ func (n *unsplitAllNode) startExec(params runParams) error { WHERE database_name=$1 AND table_name=$2 AND index_name=$3 AND split_enforced_until IS NOT NULL ` - dbDesc, err := sqlbase.GetDatabaseDescFromID(params.ctx, params.p.txn, n.tableDesc.ParentID) + dbDesc, err := sqlbase.GetDatabaseDescFromID( + params.ctx, params.p.txn, params.ExecCfg().Codec, n.tableDesc.ParentID, + ) if err != nil { return err } diff --git a/pkg/sql/vars.go b/pkg/sql/vars.go index 5b5f8e1eb4f1..643e9f47e42e 100644 --- a/pkg/sql/vars.go +++ b/pkg/sql/vars.go @@ -216,8 +216,9 @@ var varGen = map[string]sessionVar{ if len(dbName) != 0 { // Verify database descriptor exists. - if _, err := evalCtx.schemaAccessors.logical.GetDatabaseDesc(ctx, evalCtx.Txn, - dbName, tree.DatabaseLookupFlags{Required: true}); err != nil { + if _, err := evalCtx.schemaAccessors.logical.GetDatabaseDesc( + ctx, evalCtx.Txn, evalCtx.Codec, dbName, tree.DatabaseLookupFlags{Required: true}, + ); err != nil { return "", err } } diff --git a/pkg/sql/virtual_schema.go b/pkg/sql/virtual_schema.go index dcc5cf05eab2..280c9daa0769 100644 --- a/pkg/sql/virtual_schema.go +++ b/pkg/sql/virtual_schema.go @@ -322,7 +322,7 @@ func (e virtualDefEntry) getPlanInfo( var dbDesc *DatabaseDescriptor if dbName != "" { var err error - dbDesc, err = p.LogicalSchemaAccessor().GetDatabaseDesc(ctx, p.txn, + dbDesc, err = p.LogicalSchemaAccessor().GetDatabaseDesc(ctx, p.txn, p.ExecCfg().Codec, dbName, tree.DatabaseLookupFlags{Required: true, AvoidCached: p.avoidCachedDescriptors}) if err != nil { return nil, err diff --git a/pkg/sql/zone_config.go b/pkg/sql/zone_config.go index b3e214a54f4f..aebc3c5cc0ef 100644 --- a/pkg/sql/zone_config.go +++ b/pkg/sql/zone_config.go @@ -34,6 +34,12 @@ func init() { var errNoZoneConfigApplies = errors.New("no zone config applies") +// TODO(nvanbenschoten): determine how zone configurations fit into a +// multi-tenant cluster. Does each tenant have its own Zones table? Does KV have +// to make sure to look at the correct Zones according to the tenant prefix of +// its key range? See #48375. +var zoneConfigCodec = keys.TODOSQLCodec + // getZoneConfig recursively looks up entries in system.zones until an // entry that applies to the object with the specified id is // found. Returns the ID of the matching zone, its zone config, and an @@ -72,7 +78,7 @@ func getZoneConfig( // No zone config for this ID. We need to figure out if it's a table, so we // look up its descriptor. - if descVal, err := getKey(sqlbase.MakeDescMetadataKey(sqlbase.ID(id))); err != nil { + if descVal, err := getKey(sqlbase.MakeDescMetadataKey(zoneConfigCodec, sqlbase.ID(id))); err != nil { return 0, nil, 0, nil, err } else if descVal != nil { var desc sqlbase.Descriptor @@ -116,7 +122,7 @@ func completeZoneConfig( } // Check to see if its a table. If so, inherit from the database. // For all other cases, inherit from the default. - if descVal, err := getKey(sqlbase.MakeDescMetadataKey(sqlbase.ID(id))); err != nil { + if descVal, err := getKey(sqlbase.MakeDescMetadataKey(zoneConfigCodec, sqlbase.ID(id))); err != nil { return err } else if descVal != nil { var desc sqlbase.Descriptor @@ -263,7 +269,7 @@ func resolveZone(ctx context.Context, txn *kv.Txn, zs *tree.ZoneSpecifier) (sqlb errMissingKey := errors.New("missing key") id, err := zonepb.ResolveZoneSpecifier(zs, func(parentID uint32, name string) (uint32, error) { - found, id, err := sqlbase.LookupPublicTableID(ctx, txn, sqlbase.ID(parentID), name) + found, id, err := sqlbase.LookupPublicTableID(ctx, txn, zoneConfigCodec, sqlbase.ID(parentID), name) if err != nil { return 0, err } diff --git a/pkg/sql/zone_config_test.go b/pkg/sql/zone_config_test.go index a6a7989c81f7..d81159cf522c 100644 --- a/pkg/sql/zone_config_test.go +++ b/pkg/sql/zone_config_test.go @@ -33,7 +33,7 @@ import ( ) var configID = sqlbase.ID(1) -var configDescKey = sqlbase.MakeDescMetadataKey(keys.MaxReservedDescID) +var configDescKey = sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, keys.MaxReservedDescID) // forceNewConfig forces a system config update by writing a bogus descriptor with an // incremented value inside. It then repeatedly fetches the gossip config until the diff --git a/pkg/sqlmigrations/migrations.go b/pkg/sqlmigrations/migrations.go index 183f0515e7f6..090a28a832e7 100644 --- a/pkg/sqlmigrations/migrations.go +++ b/pkg/sqlmigrations/migrations.go @@ -314,18 +314,22 @@ var backwardCompatibleMigrations = []migrationDescriptor{ }, } -func staticIDs(ids ...sqlbase.ID) func(ctx context.Context, db db) ([]sqlbase.ID, error) { - return func(ctx context.Context, db db) ([]sqlbase.ID, error) { return ids, nil } +func staticIDs( + ids ...sqlbase.ID, +) func(ctx context.Context, db db, codec keys.SQLCodec) ([]sqlbase.ID, error) { + return func(ctx context.Context, db db, codec keys.SQLCodec) ([]sqlbase.ID, error) { return ids, nil } } -func databaseIDs(names ...string) func(ctx context.Context, db db) ([]sqlbase.ID, error) { - return func(ctx context.Context, db db) ([]sqlbase.ID, error) { +func databaseIDs( + names ...string, +) func(ctx context.Context, db db, codec keys.SQLCodec) ([]sqlbase.ID, error) { + return func(ctx context.Context, db db, codec keys.SQLCodec) ([]sqlbase.ID, error) { var ids []sqlbase.ID for _, name := range names { // This runs as part of an older migration (introduced in 2.1). We use // the DeprecatedDatabaseKey, and let the 20.1 migration handle moving // from the old namespace table into the new one. - kv, err := db.Get(ctx, sqlbase.NewDeprecatedDatabaseKey(name).Key()) + kv, err := db.Get(ctx, sqlbase.NewDeprecatedDatabaseKey(name).Key(codec)) if err != nil { return nil, err } @@ -365,7 +369,7 @@ type migrationDescriptor struct { // descriptors that were added by this migration. This is needed to automate // certain tests, which check the number of ranges/descriptors present on // server bootup. - newDescriptorIDs func(ctx context.Context, db db) ([]sqlbase.ID, error) + newDescriptorIDs func(ctx context.Context, db db, codec keys.SQLCodec) ([]sqlbase.ID, error) } func init() { @@ -382,6 +386,7 @@ func init() { type runner struct { db db + codec keys.SQLCodec sqlExecutor *sql.InternalExecutor settings *cluster.Settings } @@ -436,6 +441,7 @@ type Manager struct { stopper *stop.Stopper leaseManager leaseManager db db + codec keys.SQLCodec sqlExecutor *sql.InternalExecutor testingKnobs MigrationManagerTestingKnobs settings *cluster.Settings @@ -446,6 +452,7 @@ type Manager struct { func NewManager( stopper *stop.Stopper, db *kv.DB, + codec keys.SQLCodec, executor *sql.InternalExecutor, clock *hlc.Clock, testingKnobs MigrationManagerTestingKnobs, @@ -461,6 +468,7 @@ func NewManager( stopper: stopper, leaseManager: leasemanager.New(db, clock, opts), db: db, + codec: codec, sqlExecutor: executor, testingKnobs: testingKnobs, settings: settings, @@ -478,6 +486,7 @@ func NewManager( func ExpectedDescriptorIDs( ctx context.Context, db db, + codec keys.SQLCodec, defaultZoneConfig *zonepb.ZoneConfig, defaultSystemZoneConfig *zonepb.ZoneConfig, ) (sqlbase.IDs, error) { @@ -485,7 +494,7 @@ func ExpectedDescriptorIDs( if err != nil { return nil, err } - descriptorIDs := sqlbase.MakeMetadataSchema(defaultZoneConfig, defaultSystemZoneConfig).DescriptorIDs() + descriptorIDs := sqlbase.MakeMetadataSchema(codec, defaultZoneConfig, defaultSystemZoneConfig).DescriptorIDs() for _, migration := range backwardCompatibleMigrations { // Is the migration not creating descriptors? if migration.newDescriptorIDs == nil || @@ -494,7 +503,7 @@ func ExpectedDescriptorIDs( continue } if _, ok := completedMigrations[string(migrationKey(migration))]; ok { - newIDs, err := migration.newDescriptorIDs(ctx, db) + newIDs, err := migration.newDescriptorIDs(ctx, db, codec) if err != nil { return nil, err } @@ -607,6 +616,7 @@ func (m *Manager) EnsureMigrations(ctx context.Context, bootstrapVersion roachpb startTime := timeutil.Now().String() r := runner{ db: m.db, + codec: m.codec, sqlExecutor: m.sqlExecutor, settings: m.settings, } @@ -695,6 +705,7 @@ func (m *Manager) StartSchemaChangeJobMigration(ctx context.Context) error { log.Infof(ctx, "starting schema change job migration") r := runner{ db: m.db, + codec: m.codec, sqlExecutor: m.sqlExecutor, settings: m.settings, } @@ -789,7 +800,7 @@ func migrateSchemaChangeJobs(ctx context.Context, r runner, registry *jobs.Regis // DroppedTables is always populated in 19.2 for all jobs that drop // tables. if len(details.DroppedTables) > 0 { - return migrateDropTablesOrDatabaseJob(ctx, txn, registry, job) + return migrateDropTablesOrDatabaseJob(ctx, txn, r.codec, registry, job) } descIDs := job.Payload().DescriptorIDs @@ -800,7 +811,7 @@ func migrateSchemaChangeJobs(ctx context.Context, r runner, registry *jobs.Regis "job %d: could not be migrated due to unexpected descriptor IDs %v", *job.ID(), descIDs) } descID := descIDs[0] - tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, descID) + tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, r.codec, descID) if err != nil { return err } @@ -836,7 +847,7 @@ func migrateSchemaChangeJobs(ctx context.Context, r runner, registry *jobs.Regis schemaChangeJobsForDesc := make(map[sqlbase.ID][]int64) gcJobsForDesc := make(map[sqlbase.ID][]int64) if err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - descs, err := sql.GetAllDescriptors(ctx, txn) + descs, err := sql.GetAllDescriptors(ctx, txn, r.codec) if err != nil { return err } @@ -1119,7 +1130,7 @@ func migrateMutationJobForTable( // dropping a table, including dropping tables, views, sequences, and // databases, as well as truncating tables. func migrateDropTablesOrDatabaseJob( - ctx context.Context, txn *kv.Txn, registry *jobs.Registry, job *jobs.Job, + ctx context.Context, txn *kv.Txn, codec keys.SQLCodec, registry *jobs.Registry, job *jobs.Job, ) error { payload := job.Payload() details := payload.GetSchemaChange() @@ -1170,7 +1181,7 @@ func migrateDropTablesOrDatabaseJob( for i := range details.DroppedTables { tableID := details.DroppedTables[i].ID tablesToDrop[i].ID = details.DroppedTables[i].ID - desc, err := sqlbase.GetTableDescFromID(ctx, txn, tableID) + desc, err := sqlbase.GetTableDescFromID(ctx, txn, codec, tableID) if err != nil { return err } @@ -1225,8 +1236,8 @@ func createSystemTable(ctx context.Context, r runner, desc sqlbase.TableDescript err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() tKey := sqlbase.MakePublicTableNameKey(ctx, r.settings, desc.GetParentID(), desc.GetName()) - b.CPut(tKey.Key(), desc.GetID(), nil) - b.CPut(sqlbase.MakeDescMetadataKey(desc.GetID()), sqlbase.WrapDescriptor(&desc), nil) + b.CPut(tKey.Key(r.codec), desc.GetID(), nil) + b.CPut(sqlbase.MakeDescMetadataKey(r.codec, desc.GetID()), sqlbase.WrapDescriptor(&desc), nil) if err := txn.SetSystemConfigTrigger(); err != nil { return err } @@ -1280,7 +1291,6 @@ func createProtectedTimestampsRecordsTable(ctx context.Context, r runner) error } func createNewSystemNamespaceDescriptor(ctx context.Context, r runner) error { - return r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() @@ -1288,7 +1298,7 @@ func createNewSystemNamespaceDescriptor(ctx context.Context, r runner) error { // "namespace". This corrects the behavior of this migration as it existed // in 20.1 betas. The old namespace table cannot be edited without breaking // explicit selects from system.namespace in 19.2. - deprecatedKey := sqlbase.MakeDescMetadataKey(keys.DeprecatedNamespaceTableID) + deprecatedKey := sqlbase.MakeDescMetadataKey(r.codec, keys.DeprecatedNamespaceTableID) deprecatedDesc := &sqlbase.Descriptor{} ts, err := txn.GetProtoTs(ctx, deprecatedKey, deprecatedDesc) if err != nil { @@ -1310,9 +1320,9 @@ func createNewSystemNamespaceDescriptor(ctx context.Context, r runner) error { // copied over. nameKey := sqlbase.NewPublicTableKey( sqlbase.NamespaceTable.GetParentID(), sqlbase.NamespaceTableName) - b.Put(nameKey.Key(), sqlbase.NamespaceTable.GetID()) + b.Put(nameKey.Key(r.codec), sqlbase.NamespaceTable.GetID()) b.Put(sqlbase.MakeDescMetadataKey( - sqlbase.NamespaceTable.GetID()), sqlbase.WrapDescriptor(&sqlbase.NamespaceTable)) + r.codec, sqlbase.NamespaceTable.GetID()), sqlbase.WrapDescriptor(&sqlbase.NamespaceTable)) return txn.Run(ctx, b) }) } @@ -1380,12 +1390,12 @@ func migrateSystemNamespace(ctx context.Context, r runner) error { if parentID == keys.RootNamespaceID { // This row represents a database. Add it to the new namespace table. databaseKey := sqlbase.NewDatabaseKey(name) - if err := r.db.Put(ctx, databaseKey.Key(), id); err != nil { + if err := r.db.Put(ctx, databaseKey.Key(r.codec), id); err != nil { return err } // Also create a 'public' schema for this database. schemaKey := sqlbase.NewSchemaKey(id, "public") - if err := r.db.Put(ctx, schemaKey.Key(), keys.PublicSchemaID); err != nil { + if err := r.db.Put(ctx, schemaKey.Key(r.codec), keys.PublicSchemaID); err != nil { return err } } else { @@ -1398,7 +1408,7 @@ func migrateSystemNamespace(ctx context.Context, r runner) error { continue } tableKey := sqlbase.NewTableKey(parentID, keys.PublicSchemaID, name) - if err := r.db.Put(ctx, tableKey.Key(), id); err != nil { + if err := r.db.Put(ctx, tableKey.Key(r.codec), id); err != nil { return err } } diff --git a/pkg/sqlmigrations/migrations_test.go b/pkg/sqlmigrations/migrations_test.go index 613e644315ab..bf352fcfefba 100644 --- a/pkg/sqlmigrations/migrations_test.go +++ b/pkg/sqlmigrations/migrations_test.go @@ -469,6 +469,7 @@ func (mt *migrationTest) runMigration(ctx context.Context, m migrationDescriptor } return m.workFn(ctx, runner{ settings: mt.server.ClusterSettings(), + codec: keys.SystemSQLCodec, db: mt.kvDB, sqlExecutor: mt.server.InternalExecutor().(*sql.InternalExecutor), }) @@ -501,8 +502,8 @@ func TestCreateSystemTable(t *testing.T) { sqlbase.SystemAllowedPrivileges[table.ID] = sqlbase.SystemAllowedPrivileges[keys.NamespaceTableID] table.Name = "dummy" - nameKey := sqlbase.NewPublicTableKey(table.ParentID, table.Name).Key() - descKey := sqlbase.MakeDescMetadataKey(table.ID) + nameKey := sqlbase.NewPublicTableKey(table.ParentID, table.Name).Key(keys.SystemSQLCodec) + descKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, table.ID) descVal := sqlbase.WrapDescriptor(&table) mt := makeMigrationTest(ctx, t) @@ -720,10 +721,10 @@ func TestMigrateNamespaceTableDescriptors(t *testing.T) { // Since we're already on 20.1, mimic the beginning state by deleting the // new namespace descriptor. - key := sqlbase.MakeDescMetadataKey(keys.NamespaceTableID) + key := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, keys.NamespaceTableID) require.NoError(t, mt.kvDB.Del(ctx, key)) - deprecatedKey := sqlbase.MakeDescMetadataKey(keys.DeprecatedNamespaceTableID) + deprecatedKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, keys.DeprecatedNamespaceTableID) desc := &sqlbase.Descriptor{} require.NoError(t, mt.kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { _, err := txn.GetProtoTs(ctx, deprecatedKey, desc) diff --git a/pkg/testutils/localtestcluster/local_test_cluster.go b/pkg/testutils/localtestcluster/local_test_cluster.go index c00bbdb73241..0f3ba87c0c93 100644 --- a/pkg/testutils/localtestcluster/local_test_cluster.go +++ b/pkg/testutils/localtestcluster/local_test_cluster.go @@ -21,6 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/tscache" @@ -187,7 +188,9 @@ func (ltc *LocalTestCluster) Start(t testing.TB, baseCtx *base.Config, initFacto var initialValues []roachpb.KeyValue var splits []roachpb.RKey if !ltc.DontCreateSystemRanges { - schema := sqlbase.MakeMetadataSchema(cfg.DefaultZoneConfig, cfg.DefaultSystemZoneConfig) + schema := sqlbase.MakeMetadataSchema( + keys.SystemSQLCodec, cfg.DefaultZoneConfig, cfg.DefaultSystemZoneConfig, + ) var tableSplits []roachpb.RKey bootstrapVersion := clusterversion.TestingClusterVersion initialValues, tableSplits = schema.GetInitialValues(bootstrapVersion) diff --git a/pkg/testutils/testcluster/testcluster_test.go b/pkg/testutils/testcluster/testcluster_test.go index 9c2183edf8e1..84efcfc5e7c3 100644 --- a/pkg/testutils/testcluster/testcluster_test.go +++ b/pkg/testutils/testcluster/testcluster_test.go @@ -58,7 +58,7 @@ func TestManualReplication(t *testing.T) { // Split the table to a new range. kvDB := tc.Servers[0].DB() - tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") + tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") tableStartKey := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.ID)) leftRangeDesc, tableRangeDesc, err := tc.SplitRange(tableStartKey)