diff --git a/pkg/ccl/backupccl/backup_job.go b/pkg/ccl/backupccl/backup_job.go index f3a9b7312757..1c515ff7d5b5 100644 --- a/pkg/ccl/backupccl/backup_job.go +++ b/pkg/ccl/backupccl/backup_job.go @@ -15,10 +15,10 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -60,7 +60,7 @@ func countRows(raw roachpb.BulkOpSummary, pkIDs map[uint64]struct{}) RowCount { return res } -func allRangeDescriptors(ctx context.Context, txn *client.Txn) ([]roachpb.RangeDescriptor, error) { +func allRangeDescriptors(ctx context.Context, txn *kv.Txn) ([]roachpb.RangeDescriptor, error) { rows, err := txn.Scan(ctx, keys.Meta2Prefix, keys.MetaMax, 0) if err != nil { return nil, errors.Wrapf(err, @@ -161,7 +161,7 @@ type spanAndTime struct { // file. func backup( ctx context.Context, - db *client.DB, + db *kv.DB, gossip *gossip.Gossip, settings *cluster.Settings, defaultStore cloud.ExternalStorage, @@ -186,7 +186,7 @@ func backup( var checkpointMu syncutil.Mutex var ranges []roachpb.RangeDescriptor - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error // TODO(benesch): limit the range descriptors we fetch to the ranges that // are actually relevant in the backup to speed up small backups on large @@ -306,7 +306,7 @@ func backup( MVCCFilter: roachpb.MVCCFilter(backupManifest.MVCCFilter), Encryption: encryption, } - rawRes, pErr := client.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req) + rawRes, pErr := kv.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req) if pErr != nil { return pErr.GoError() } @@ -518,7 +518,7 @@ func (b *backupResumer) Resume( return nil } -func (b *backupResumer) clearStats(ctx context.Context, DB *client.DB) error { +func (b *backupResumer) clearStats(ctx context.Context, DB *kv.DB) error { details := b.job.Details().(jobspb.BackupDetails) var backupManifest BackupManifest if err := protoutil.Unmarshal(details.BackupManifest, &backupManifest); err != nil { @@ -530,7 +530,7 @@ func (b *backupResumer) clearStats(ctx context.Context, DB *client.DB) error { return err } details.BackupManifest = descBytes - err = DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err = DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return b.job.WithTxn(txn).SetDetails(ctx, details) }) return err diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index f82fee51e4c1..95e32100d271 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -35,10 +35,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/utilccl/sampledataccl" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -2475,7 +2475,7 @@ func TestRestoreAsOfSystemTimeGCBounds(t *testing.T) { }, Threshold: tc.Server(0).Clock().Now(), } - if _, err := client.SendWrapped( + if _, err := kv.SendWrapped( ctx, tc.Server(0).DistSenderI().(*kvcoord.DistSender), &gcr, ); err != nil { t.Fatal(err) diff --git a/pkg/ccl/backupccl/restore_job.go b/pkg/ccl/backupccl/restore_job.go index 9f66797d58be..39ed3cb7f2ac 100644 --- a/pkg/ccl/backupccl/restore_job.go +++ b/pkg/ccl/backupccl/restore_job.go @@ -19,10 +19,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/storageccl" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" @@ -291,7 +291,7 @@ rangeLoop: func splitAndScatter( restoreCtx context.Context, settings *cluster.Settings, - db *client.DB, + db *kv.DB, kr *storageccl.KeyRewriter, numClusterNodes int, importSpans []importEntry, @@ -353,7 +353,7 @@ func splitAndScatter( // span being restored into. RandomizeLeases: true, } - if _, pErr := client.SendWrapped(ctx, db.NonTransactionalSender(), scatterReq); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, db.NonTransactionalSender(), scatterReq); pErr != nil { // TODO(dan): Unfortunately, Scatter is still too unreliable to // fail the RESTORE when Scatter fails. I'm uncomfortable that // this could break entirely and not start failing the tests, @@ -397,7 +397,7 @@ func splitAndScatter( scatterReq := &roachpb.AdminScatterRequest{ RequestHeader: roachpb.RequestHeaderFromSpan(roachpb.Span{Key: newSpanKey, EndKey: newSpanKey.Next()}), } - if _, pErr := client.SendWrapped(ctx, db.NonTransactionalSender(), scatterReq); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, db.NonTransactionalSender(), scatterReq); pErr != nil { // TODO(dan): Unfortunately, Scatter is still too unreliable to // fail the RESTORE when Scatter fails. I'm uncomfortable that // this could break entirely and not start failing the tests, @@ -427,7 +427,7 @@ func splitAndScatter( // on that database at the time this function is called. func WriteTableDescs( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, databases []*sqlbase.DatabaseDescriptor, tables []*sqlbase.TableDescriptor, descCoverage tree.DescriptorCoverage, @@ -557,7 +557,7 @@ func rewriteBackupSpanKey(kr *storageccl.KeyRewriter, key roachpb.Key) (roachpb. // files. func restore( restoreCtx context.Context, - db *client.DB, + db *kv.DB, gossip *gossip.Gossip, settings *cluster.Settings, backupManifests []BackupManifest, @@ -710,7 +710,7 @@ func restore( defer tracing.FinishSpan(importSpan) defer func() { <-importsSem }() - importRes, pErr := client.SendWrapped(ctx, db.NonTransactionalSender(), importRequest) + importRes, pErr := kv.SendWrapped(ctx, db.NonTransactionalSender(), importRequest) if pErr != nil { return errors.Wrapf(pErr.GoError(), "importing span %v", importRequest.DataSpan) @@ -834,14 +834,14 @@ func remapRelevantStatistics( // after the other. func isDatabaseEmpty( ctx context.Context, - db *client.DB, + db *kv.DB, dbDesc *sql.DatabaseDescriptor, ignoredTables map[sqlbase.ID]struct{}, ) (bool, error) { var allDescs []sqlbase.Descriptor if err := db.Txn( ctx, - func(ctx context.Context, txn *client.Txn) error { + func(ctx context.Context, txn *kv.Txn) error { var err error allDescs, err = allSQLDescriptors(ctx, txn) return err @@ -923,7 +923,7 @@ func createImportingTables( } if !details.PrepareCompleted { - err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { // Write the new TableDescriptors which are set in the OFFLINE state. if err := WriteTableDescs(ctx, txn, databases, tables, details.DescriptorCoverage, r.job.Payload().Username, r.settings, nil /* extra */); err != nil { return errors.Wrapf(err, "restoring %d TableDescriptors from %d databases", len(r.tables), len(databases)) @@ -1029,7 +1029,7 @@ func (r *restoreResumer) insertStats(ctx context.Context) error { return nil } - err := r.execCfg.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := r.execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := stats.InsertNewStats(ctx, r.execCfg.InternalExecutor, txn, r.latestStats); err != nil { return errors.Wrapf(err, "inserting stats from backup") } @@ -1053,7 +1053,7 @@ func (r *restoreResumer) publishTables(ctx context.Context) error { } log.Event(ctx, "making tables live") - err := r.execCfg.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := r.execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { // Write the new TableDescriptors and flip state over to public so they can be // accessed. b := txn.NewBatch() @@ -1107,7 +1107,7 @@ func (r *restoreResumer) OnFailOrCancel(ctx context.Context, phs interface{}) er } // dropTables implements the OnFailOrCancel logic. -func (r *restoreResumer) dropTables(ctx context.Context, txn *client.Txn) error { +func (r *restoreResumer) dropTables(ctx context.Context, txn *kv.Txn) error { details := r.job.Details().(jobspb.RestoreDetails) // No need to mark the tables as dropped if they were not even created in the diff --git a/pkg/ccl/backupccl/restore_planning.go b/pkg/ccl/backupccl/restore_planning.go index b89f62368aef..5a272e6616b8 100644 --- a/pkg/ccl/backupccl/restore_planning.go +++ b/pkg/ccl/backupccl/restore_planning.go @@ -14,10 +14,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/storageccl" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/covering" @@ -219,7 +219,7 @@ func allocateTableRewrites( // Fail fast if the necessary databases don't exist or are otherwise // incompatible with this restore. - if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { maxExpectedDB := keys.MinUserDescID + sql.MaxDefaultDescriptorID // Check that any DBs being restored do _not_ exist. for name := range restoreDBNames { diff --git a/pkg/ccl/backupccl/targets.go b/pkg/ccl/backupccl/targets.go index 5c3f7dcfdd9d..5092f9b275fa 100644 --- a/pkg/ccl/backupccl/targets.go +++ b/pkg/ccl/backupccl/targets.go @@ -13,8 +13,8 @@ import ( "sort" "github.com/cockroachdb/cockroach/pkg/ccl/storageccl" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -297,7 +297,7 @@ func descriptorsMatchingTargets( // a descriptor the the ID by which it was previously known (e.g pre-TRUNCATE). func getRelevantDescChanges( ctx context.Context, - db *client.DB, + db *kv.DB, startTime, endTime hlc.Timestamp, descs []sqlbase.Descriptor, expanded []sqlbase.ID, @@ -402,7 +402,7 @@ func getRelevantDescChanges( // nil content). func getAllDescChanges( ctx context.Context, - db *client.DB, + db *kv.DB, startTime, endTime hlc.Timestamp, priorIDs map[sqlbase.ID]sqlbase.ID, ) ([]BackupManifest_DescriptorRevision, error) { @@ -440,7 +440,7 @@ func getAllDescChanges( return res, nil } -func allSQLDescriptors(ctx context.Context, txn *client.Txn) ([]sqlbase.Descriptor, error) { +func allSQLDescriptors(ctx context.Context, txn *kv.Txn) ([]sqlbase.Descriptor, error) { startKey := roachpb.Key(keys.MakeTablePrefix(keys.DescriptorTableID)) endKey := startKey.PrefixEnd() rows, err := txn.Scan(ctx, startKey, endKey, 0) @@ -492,12 +492,12 @@ func ensureInterleavesIncluded(tables []*sqlbase.TableDescriptor) error { } func loadAllDescs( - ctx context.Context, db *client.DB, asOf hlc.Timestamp, + ctx context.Context, db *kv.DB, asOf hlc.Timestamp, ) ([]sqlbase.Descriptor, error) { var allDescs []sqlbase.Descriptor if err := db.Txn( ctx, - func(ctx context.Context, txn *client.Txn) error { + func(ctx context.Context, txn *kv.Txn) error { var err error txn.SetFixedTimestamp(ctx, asOf) allDescs, err = allSQLDescriptors(ctx, txn) @@ -592,7 +592,7 @@ func fullClusterTargets( return fullClusterDescs, fullClusterDBs, nil } -func lookupDatabaseID(ctx context.Context, txn *client.Txn, name string) (sqlbase.ID, error) { +func lookupDatabaseID(ctx context.Context, txn *kv.Txn, name string) (sqlbase.ID, error) { found, id, err := sqlbase.LookupDatabaseID(ctx, txn, name) if err != nil { return sqlbase.InvalidID, err @@ -605,9 +605,7 @@ func lookupDatabaseID(ctx context.Context, txn *client.Txn, name string) (sqlbas // CheckTableExists returns an error if a table already exists with given // parent and name. -func CheckTableExists( - ctx context.Context, txn *client.Txn, parentID sqlbase.ID, name string, -) error { +func CheckTableExists(ctx context.Context, txn *kv.Txn, parentID sqlbase.ID, name string) error { found, _, err := sqlbase.LookupPublicTableID(ctx, txn, parentID, name) if err != nil { return err diff --git a/pkg/ccl/changefeedccl/changefeed_dist.go b/pkg/ccl/changefeedccl/changefeed_dist.go index c267f61e3f4d..b809a98db8a7 100644 --- a/pkg/ccl/changefeedccl/changefeed_dist.go +++ b/pkg/ccl/changefeedccl/changefeed_dist.go @@ -11,8 +11,8 @@ package changefeedccl import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -89,7 +89,7 @@ func distChangefeedFlow( } // Changefeed flows handle transactional consistency themselves. - var noTxn *client.Txn + var noTxn *kv.Txn gatewayNodeID := execCfg.NodeID.Get() dsp := phs.DistSQLPlanner() evalCtx := phs.ExtendedEvalContext() @@ -195,10 +195,10 @@ func distChangefeedFlow( } func fetchSpansForTargets( - ctx context.Context, db *client.DB, targets jobspb.ChangefeedTargets, ts hlc.Timestamp, + ctx context.Context, db *kv.DB, targets jobspb.ChangefeedTargets, ts hlc.Timestamp, ) ([]roachpb.Span, error) { var spans []roachpb.Span - err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { spans = nil txn.SetFixedTimestamp(ctx, ts) // Note that all targets are currently guaranteed to be tables. diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index 1c463f7edd26..eed1f65c77a8 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -28,10 +28,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/cdctest" "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server" @@ -796,7 +796,7 @@ func fetchDescVersionModificationTime( } clock := hlc.NewClock(hlc.UnixNano, time.Minute) hh := roachpb.Header{Timestamp: clock.Now()} - res, pErr := client.SendWrappedWith(context.Background(), + res, pErr := kv.SendWrappedWith(context.Background(), f.Server().DB().NonTransactionalSender(), hh, req) if pErr != nil { t.Fatal(pErr.GoError()) diff --git a/pkg/ccl/changefeedccl/kvfeed/kv_feed.go b/pkg/ccl/changefeedccl/kvfeed/kv_feed.go index d99be67fa916..a8c4411b3f48 100644 --- a/pkg/ccl/changefeedccl/kvfeed/kv_feed.go +++ b/pkg/ccl/changefeedccl/kvfeed/kv_feed.go @@ -19,8 +19,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase" "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/schemafeed" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -36,7 +36,7 @@ import ( // Config configures a kvfeed. type Config struct { Settings *cluster.Settings - DB *client.DB + DB *kv.DB Clock *hlc.Clock Gossip *gossip.Gossip Spans []roachpb.Span @@ -83,7 +83,7 @@ func Run(ctx context.Context, cfg Config) error { var pff physicalFeedFactory { sender := cfg.DB.NonTransactionalSender() - distSender := sender.(*client.CrossRangeTxnWrapperSender).Wrapped().(*kvcoord.DistSender) + distSender := sender.(*kv.CrossRangeTxnWrapperSender).Wrapped().(*kvcoord.DistSender) pff = rangefeedFactory(distSender.RangeFeed) } bf := func() EventBuffer { diff --git a/pkg/ccl/changefeedccl/kvfeed/scanner.go b/pkg/ccl/changefeedccl/kvfeed/scanner.go index 4f665edb4c6b..39e296b72113 100644 --- a/pkg/ccl/changefeedccl/kvfeed/scanner.go +++ b/pkg/ccl/changefeedccl/kvfeed/scanner.go @@ -14,8 +14,8 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -37,7 +37,7 @@ type kvScanner interface { type scanRequestScanner struct { settings *cluster.Settings gossip *gossip.Gossip - db *client.DB + db *kv.DB } var _ kvScanner = (*scanRequestScanner)(nil) @@ -143,10 +143,10 @@ func (p *scanRequestScanner) exportSpan( } func getSpansToProcess( - ctx context.Context, db *client.DB, targetSpans []roachpb.Span, + ctx context.Context, db *kv.DB, targetSpans []roachpb.Span, ) ([]roachpb.Span, error) { var ranges []roachpb.RangeDescriptor - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error ranges, err = allRangeDescriptors(ctx, txn) return err @@ -223,7 +223,7 @@ func slurpScanResponse( return nil } -func allRangeDescriptors(ctx context.Context, txn *client.Txn) ([]roachpb.RangeDescriptor, error) { +func allRangeDescriptors(ctx context.Context, txn *kv.Txn) ([]roachpb.RangeDescriptor, error) { rows, err := txn.Scan(ctx, keys.Meta2Prefix, keys.MetaMax, 0) if err != nil { return nil, err diff --git a/pkg/ccl/changefeedccl/schemafeed/schema_feed.go b/pkg/ccl/changefeedccl/schemafeed/schema_feed.go index a3c85e03ad1a..57a5ba37a52f 100644 --- a/pkg/ccl/changefeedccl/schemafeed/schema_feed.go +++ b/pkg/ccl/changefeedccl/schemafeed/schema_feed.go @@ -15,9 +15,9 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/ccl/changefeedccl/changefeedbase" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" @@ -48,7 +48,7 @@ func (e TableEvent) Timestamp() hlc.Timestamp { // Config configures a SchemaFeed. type Config struct { - DB *client.DB + DB *kv.DB Clock *hlc.Clock Settings *cluster.Settings Targets jobspb.ChangefeedTargets @@ -87,7 +87,7 @@ type Config struct { // lowest timestamp where at least one table doesn't meet the invariant. type SchemaFeed struct { filter tableEventFilter - db *client.DB + db *kv.DB clock *hlc.Clock settings *cluster.Settings targets jobspb.ChangefeedTargets @@ -185,7 +185,7 @@ func (tf *SchemaFeed) primeInitialTableDescs(ctx context.Context) error { initialTableDescTs := tf.mu.highWater tf.mu.Unlock() var initialDescs []*sqlbase.TableDescriptor - initialTableDescsFn := func(ctx context.Context, txn *client.Txn) error { + initialTableDescsFn := func(ctx context.Context, txn *kv.Txn) error { initialDescs = initialDescs[:0] txn.SetFixedTimestamp(ctx, initialTableDescTs) // Note that all targets are currently guaranteed to be tables. @@ -446,10 +446,7 @@ func (tf *SchemaFeed) validateTable(ctx context.Context, desc *sqlbase.TableDesc } func fetchTableDescriptorVersions( - ctx context.Context, - db *client.DB, - startTS, endTS hlc.Timestamp, - targets jobspb.ChangefeedTargets, + ctx context.Context, db *kv.DB, startTS, endTS hlc.Timestamp, targets jobspb.ChangefeedTargets, ) ([]*sqlbase.TableDescriptor, error) { if log.V(2) { log.Infof(ctx, `fetching table descs (%s,%s]`, startTS, endTS) @@ -465,7 +462,7 @@ func fetchTableDescriptorVersions( ReturnSST: true, OmitChecksum: true, } - res, pErr := client.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req) + res, pErr := kv.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req) if log.V(2) { log.Infof(ctx, `fetched table descs (%s,%s] took %s`, startTS, endTS, timeutil.Since(start)) } diff --git a/pkg/ccl/followerreadsccl/followerreads.go b/pkg/ccl/followerreadsccl/followerreads.go index b6b2314aca7d..60393fd067cf 100644 --- a/pkg/ccl/followerreadsccl/followerreads.go +++ b/pkg/ccl/followerreadsccl/followerreads.go @@ -16,7 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts" @@ -131,7 +131,7 @@ func newOracleFactory(cfg replicaoracle.Config) replicaoracle.OracleFactory { } } -func (f oracleFactory) Oracle(txn *client.Txn) replicaoracle.Oracle { +func (f oracleFactory) Oracle(txn *kv.Txn) replicaoracle.Oracle { if txn != nil && canUseFollowerRead(f.clusterID.Get(), f.st, txn.ReadTimestamp()) { return f.closest.Oracle(txn) } diff --git a/pkg/ccl/followerreadsccl/followerreads_test.go b/pkg/ccl/followerreadsccl/followerreads_test.go index 214a8bdd2e44..1b1ecc6a9cbb 100644 --- a/pkg/ccl/followerreadsccl/followerreads_test.go +++ b/pkg/ccl/followerreadsccl/followerreads_test.go @@ -15,7 +15,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" @@ -154,11 +154,11 @@ func TestOracleFactory(t *testing.T) { stopper := stop.NewStopper() defer stopper.Stop(context.TODO()) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) - c := client.NewDB(log.AmbientContext{ + c := kv.NewDB(log.AmbientContext{ Tracer: tracing.NewTracer(), - }, client.MockTxnSenderFactory{}, + }, kv.MockTxnSenderFactory{}, hlc.NewClock(hlc.UnixNano, time.Nanosecond)) - txn := client.NewTxn(context.TODO(), c, 0) + txn := kv.NewTxn(context.TODO(), c, 0) of := replicaoracle.NewOracleFactory(followerReadAwareChoice, replicaoracle.Config{ Settings: st, RPCContext: rpcContext, diff --git a/pkg/ccl/importccl/import_processor_test.go b/pkg/ccl/importccl/import_processor_test.go index b760bf11ddd2..d8aa2e6a58ba 100644 --- a/pkg/ccl/importccl/import_processor_test.go +++ b/pkg/ccl/importccl/import_processor_test.go @@ -23,9 +23,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/blobs" "github.com/cockroachdb/cockroach/pkg/ccl/backupccl" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -227,7 +227,7 @@ func TestImportIgnoresProcessedFiles(t *testing.T) { Settings: &cluster.Settings{}, ExternalStorage: externalStorageFactory, BulkAdder: func( - _ context.Context, _ *client.DB, _ hlc.Timestamp, + _ context.Context, _ *kv.DB, _ hlc.Timestamp, _ storagebase.BulkAdderOptions) (storagebase.BulkAdder, error) { return &doNothingKeyAdder{}, nil }, @@ -325,7 +325,7 @@ func TestImportHonorsResumePosition(t *testing.T) { Settings: &cluster.Settings{}, ExternalStorage: externalStorageFactory, BulkAdder: func( - _ context.Context, _ *client.DB, _ hlc.Timestamp, + _ context.Context, _ *kv.DB, _ hlc.Timestamp, opts storagebase.BulkAdderOptions) (storagebase.BulkAdder, error) { if opts.Name == "pkAdder" { return pkBulkAdder, nil diff --git a/pkg/ccl/importccl/import_stmt.go b/pkg/ccl/importccl/import_stmt.go index 13af08468f2b..37d48cee6b8c 100644 --- a/pkg/ccl/importccl/import_stmt.go +++ b/pkg/ccl/importccl/import_stmt.go @@ -18,10 +18,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/ccl/backupccl" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" @@ -658,7 +658,7 @@ func importPlanHook( Progress: jobspb.ImportProgress{}, } var sj *jobs.StartableJob - if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { sj, err = p.ExecCfg().JobRegistry.CreateStartableJobWithTxn(ctx, jr, txn, resultsCh) if err != nil { return err @@ -784,7 +784,7 @@ type importResumer struct { // Prepares descriptors for newly created tables being imported into. func prepareNewTableDescsForIngestion( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, p sql.PlanHookState, tables []jobspb.ImportDetails_Table, parentID sqlbase.ID, @@ -850,7 +850,7 @@ func prepareNewTableDescsForIngestion( // Prepares descriptors for existing tables being imported into. func prepareExistingTableDescForIngestion( - ctx context.Context, txn *client.Txn, desc *sqlbase.TableDescriptor, p sql.PlanHookState, + ctx context.Context, txn *kv.Txn, desc *sqlbase.TableDescriptor, p sql.PlanHookState, ) (*sqlbase.TableDescriptor, error) { if len(desc.Mutations) > 0 { return nil, errors.Errorf("cannot IMPORT INTO a table with schema changes in progress -- try again later (pending mutation %s)", desc.Mutations[0].String()) @@ -900,7 +900,7 @@ func prepareExistingTableDescForIngestion( func (r *importResumer) prepareTableDescsForIngestion( ctx context.Context, p sql.PlanHookState, details jobspb.ImportDetails, ) error { - err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { importDetails := details importDetails.Tables = make([]jobspb.ImportDetails_Table, len(details.Tables)) @@ -1055,7 +1055,7 @@ func (r *importResumer) Resume( // successfully finished the import but failed to drop the protected // timestamp. The reconciliation loop ought to pick it up. if ptsID != nil && !r.testingKnobs.ignoreProtectedTimestamps { - if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return r.releaseProtectedTimestamp(ctx, txn, p.ExecCfg().ProtectedTimestampProvider) }); err != nil { log.Errorf(ctx, "failed to release protected timestamp: %v", err) @@ -1087,7 +1087,7 @@ func (r *importResumer) publishTables(ctx context.Context, execCfg *sql.Executor log.Event(ctx, "making tables live") // Needed to trigger the schema change manager. - err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } @@ -1170,7 +1170,7 @@ func (r *importResumer) publishTables(ctx context.Context, execCfg *sql.Executor // stuff to delete the keys in the background. func (r *importResumer) OnFailOrCancel(ctx context.Context, phs interface{}) error { cfg := phs.(sql.PlanHookState).ExecCfg() - return cfg.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := r.dropTables(ctx, txn); err != nil { return err } @@ -1179,7 +1179,7 @@ func (r *importResumer) OnFailOrCancel(ctx context.Context, phs interface{}) err } func (r *importResumer) releaseProtectedTimestamp( - ctx context.Context, txn *client.Txn, pts protectedts.Storage, + ctx context.Context, txn *kv.Txn, pts protectedts.Storage, ) error { details := r.job.Details().(jobspb.ImportDetails) ptsID := details.ProtectedTimestampRecord @@ -1198,7 +1198,7 @@ func (r *importResumer) releaseProtectedTimestamp( } // dropTables implements the OnFailOrCancel logic. -func (r *importResumer) dropTables(ctx context.Context, txn *client.Txn) error { +func (r *importResumer) dropTables(ctx context.Context, txn *kv.Txn) error { details := r.job.Details().(jobspb.ImportDetails) // Needed to trigger the schema change manager. diff --git a/pkg/ccl/importccl/import_table_creation.go b/pkg/ccl/importccl/import_table_creation.go index 9328b6c8f565..3bcbf19df9a1 100644 --- a/pkg/ccl/importccl/import_table_creation.go +++ b/pkg/ccl/importccl/import_table_creation.go @@ -14,8 +14,8 @@ import ( "io/ioutil" "strings" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/parser" @@ -243,7 +243,7 @@ type fkResolver map[string]*sqlbase.MutableTableDescriptor var _ sql.SchemaResolver = fkResolver{} // Implements the sql.SchemaResolver interface. -func (r fkResolver) Txn() *client.Txn { +func (r fkResolver) Txn() *kv.Txn { return nil } diff --git a/pkg/ccl/importccl/load.go b/pkg/ccl/importccl/load.go index 9dd99c0100a4..ea6af1859647 100644 --- a/pkg/ccl/importccl/load.go +++ b/pkg/ccl/importccl/load.go @@ -21,8 +21,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/blobs" "github.com/cockroachdb/cockroach/pkg/ccl/backupccl" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" @@ -200,7 +200,7 @@ func Load( // A nil txn is safe because it is only used by sql.MakeTableDesc, which // only uses txn for resolving FKs and interleaved tables, neither of which // are present here. Ditto for the schema accessor. - var txn *client.Txn + var txn *kv.Txn // At this point the CREATE statements in the loaded SQL do not // use the SERIAL type so we need not process SERIAL types here. desc, err := sql.MakeTableDesc(ctx, txn, nil /* vt */, st, s, dbDesc.ID, keys.PublicSchemaID, diff --git a/pkg/ccl/importccl/load_test.go b/pkg/ccl/importccl/load_test.go index c2affbec4c00..c07cfece9d6b 100644 --- a/pkg/ccl/importccl/load_test.go +++ b/pkg/ccl/importccl/load_test.go @@ -18,7 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/ccl/importccl" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -56,7 +56,7 @@ func TestGetDescriptorFromDB(t *testing.T) { aliceDesc := &sqlbase.DatabaseDescriptor{Name: "alice"} bobDesc := &sqlbase.DatabaseDescriptor{Name: "bob"} - err := kvDB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { batch := txn.NewBatch() batch.Put(sqlbase.NewDatabaseKey("bob").Key(), 9999) batch.Put(sqlbase.NewDeprecatedDatabaseKey("alice").Key(), 10000) diff --git a/pkg/ccl/storageccl/bench_test.go b/pkg/ccl/storageccl/bench_test.go index 931d8c19148f..a88a97b098ac 100644 --- a/pkg/ccl/storageccl/bench_test.go +++ b/pkg/ccl/storageccl/bench_test.go @@ -17,8 +17,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/ccl/utilccl/sampledataccl" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/storage" @@ -205,7 +205,7 @@ func BenchmarkImport(b *testing.B) { Files: files, Rekeys: rekeys, } - res, pErr := client.SendWrapped(ctx, kvDB.NonTransactionalSender(), req) + res, pErr := kv.SendWrapped(ctx, kvDB.NonTransactionalSender(), req) if pErr != nil { b.Fatalf("%+v", pErr.GoError()) } diff --git a/pkg/ccl/storageccl/export_test.go b/pkg/ccl/storageccl/export_test.go index aad3723db970..3858ad12f571 100644 --- a/pkg/ccl/storageccl/export_test.go +++ b/pkg/ccl/storageccl/export_test.go @@ -21,8 +21,8 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/storage" @@ -59,7 +59,7 @@ func TestExportCmd(t *testing.T) { ReturnSST: true, TargetFileSize: ExportRequestTargetFileSize.Get(&tc.Server(0).ClusterSettings().SV), } - return client.SendWrapped(ctx, kvDB.NonTransactionalSender(), req) + return kv.SendWrapped(ctx, kvDB.NonTransactionalSender(), req) } exportAndSlurpOne := func( @@ -272,7 +272,7 @@ func TestExportGCThreshold(t *testing.T) { RequestHeader: roachpb.RequestHeader{Key: keys.UserTableDataMin, EndKey: keys.MaxKey}, StartTime: hlc.Timestamp{WallTime: -1}, } - _, pErr := client.SendWrapped(ctx, kvDB.NonTransactionalSender(), req) + _, pErr := kv.SendWrapped(ctx, kvDB.NonTransactionalSender(), req) if !testutils.IsPError(pErr, "must be after replica GC threshold") { t.Fatalf(`expected "must be after replica GC threshold" error got: %+v`, pErr) } diff --git a/pkg/ccl/storageccl/import_test.go b/pkg/ccl/storageccl/import_test.go index ef0ee5538ebc..35fad6eec028 100644 --- a/pkg/ccl/storageccl/import_test.go +++ b/pkg/ccl/storageccl/import_test.go @@ -21,8 +21,8 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -114,7 +114,7 @@ func slurpSSTablesLatestKey( return kvs } -func clientKVsToEngineKVs(kvs []client.KeyValue) []storage.MVCCKeyValue { +func clientKVsToEngineKVs(kvs []kv.KeyValue) []storage.MVCCKeyValue { var ret []storage.MVCCKeyValue for _, kv := range kvs { if kv.Value == nil { @@ -341,7 +341,7 @@ func runTestImport(t *testing.T, init func(*cluster.Settings)) { // Import may be retried by DistSender if it takes too long to return, so // make sure it's idempotent. for j := 0; j < 2; j++ { - b := &client.Batch{} + b := &kv.Batch{} b.AddRawRequest(req) if err := kvDB.Run(ctx, b); err != nil { t.Fatalf("%+v", err) diff --git a/pkg/ccl/storageccl/revision_reader.go b/pkg/ccl/storageccl/revision_reader.go index af39069d2057..c3b9cf03a106 100644 --- a/pkg/ccl/storageccl/revision_reader.go +++ b/pkg/ccl/storageccl/revision_reader.go @@ -11,7 +11,7 @@ package storageccl import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -29,10 +29,7 @@ type VersionedValues struct { // revisions between startTime and endTime. // TODO(dt): if/when client gets a ScanRevisionsRequest or similar, use that. func GetAllRevisions( - ctx context.Context, - db *client.DB, - startKey, endKey roachpb.Key, - startTime, endTime hlc.Timestamp, + ctx context.Context, db *kv.DB, startKey, endKey roachpb.Key, startTime, endTime hlc.Timestamp, ) ([]VersionedValues, error) { // TODO(dt): version check. header := roachpb.Header{Timestamp: endTime} @@ -43,7 +40,7 @@ func GetAllRevisions( ReturnSST: true, OmitChecksum: true, } - resp, pErr := client.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req) + resp, pErr := kv.SendWrappedWith(ctx, db.NonTransactionalSender(), header, req) if pErr != nil { return nil, pErr.GoError() } diff --git a/pkg/cli/debug_test.go b/pkg/cli/debug_test.go index abedafe715b2..a3d89ca96c3f 100644 --- a/pkg/cli/debug_test.go +++ b/pkg/cli/debug_test.go @@ -23,8 +23,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" @@ -226,7 +226,7 @@ func TestRemoveDeadReplicas(t *testing.T) { // we restart the cluster, so just write a setting. s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'") - txn := client.NewTxn(ctx, tc.Servers[0].DB(), 1) + txn := kv.NewTxn(ctx, tc.Servers[0].DB(), 1) var desc roachpb.RangeDescriptor // Pick one of the predefined split points. rdKey := keys.RangeDescriptorKey(roachpb.RKey(keys.TimeseriesPrefix)) diff --git a/pkg/internal/client/requestbatcher/batcher.go b/pkg/internal/client/requestbatcher/batcher.go index d28d20b843d7..41f65ae883d4 100644 --- a/pkg/internal/client/requestbatcher/batcher.go +++ b/pkg/internal/client/requestbatcher/batcher.go @@ -29,7 +29,7 @@ import ( "sync" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/contextutil" "github.com/cockroachdb/cockroach/pkg/util/stop" @@ -96,7 +96,7 @@ type Config struct { Name string // Sender can round-trip a batch. Sender must not be nil. - Sender client.Sender + Sender kv.Sender // Stopper controls the lifecycle of the Batcher. Stopper must not be nil. Stopper *stop.Stopper diff --git a/pkg/jobs/jobs.go b/pkg/jobs/jobs.go index 76e237df0a27..b3fef0b4fd70 100644 --- a/pkg/jobs/jobs.go +++ b/pkg/jobs/jobs.go @@ -16,8 +16,8 @@ import ( "reflect" "sync/atomic" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -38,7 +38,7 @@ type Job struct { registry *Registry id *int64 - txn *client.Txn + txn *kv.Txn mu struct { syncutil.Mutex payload jobspb.Payload @@ -66,7 +66,7 @@ type Record struct { // See Registry.CreateStartableJob type StartableJob struct { *Job - txn *client.Txn + txn *kv.Txn resumer Resumer resumerCtx context.Context cancel context.CancelFunc @@ -174,7 +174,7 @@ func (j *Job) Created(ctx context.Context) error { // Started marks the tracked job as started. func (j *Job) Started(ctx context.Context) error { - return j.Update(ctx, func(_ *client.Txn, md JobMetadata, ju *JobUpdater) error { + return j.Update(ctx, func(_ *kv.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status != StatusPending && md.Status != StatusRunning { return errors.Errorf("job with status %s cannot be marked started", md.Status) } @@ -190,14 +190,14 @@ func (j *Job) Started(ctx context.Context) error { // CheckStatus verifies the status of the job and returns an error if the job's // status isn't Running or Reverting. func (j *Job) CheckStatus(ctx context.Context) error { - return j.Update(ctx, func(_ *client.Txn, md JobMetadata, _ *JobUpdater) error { + return j.Update(ctx, func(_ *kv.Txn, md JobMetadata, _ *JobUpdater) error { return md.CheckRunningOrReverting() }) } // CheckTerminalStatus returns true if the job is in a terminal status. func (j *Job) CheckTerminalStatus(ctx context.Context) bool { - err := j.Update(ctx, func(_ *client.Txn, md JobMetadata, _ *JobUpdater) error { + err := j.Update(ctx, func(_ *kv.Txn, md JobMetadata, _ *JobUpdater) error { if !md.Status.Terminal() { return &InvalidStatusError{md.ID, md.Status, "checking that job status is success", md.Payload.Error} } @@ -211,7 +211,7 @@ func (j *Job) CheckTerminalStatus(ctx context.Context) bool { // It sets the job's RunningStatus field to the value returned by runningStatusFn // and persists runningStatusFn's modifications to the job's details, if any. func (j *Job) RunningStatus(ctx context.Context, runningStatusFn RunningStatusFn) error { - return j.Update(ctx, func(_ *client.Txn, md JobMetadata, ju *JobUpdater) error { + return j.Update(ctx, func(_ *kv.Txn, md JobMetadata, ju *JobUpdater) error { if err := md.CheckRunningOrReverting(); err != nil { return err } @@ -227,7 +227,7 @@ func (j *Job) RunningStatus(ctx context.Context, runningStatusFn RunningStatusFn // SetDescription updates the description of a created job. func (j *Job) SetDescription(ctx context.Context, updateFn DescriptionUpdateFn) error { - return j.Update(ctx, func(_ *client.Txn, md JobMetadata, ju *JobUpdater) error { + return j.Update(ctx, func(_ *kv.Txn, md JobMetadata, ju *JobUpdater) error { prev := md.Payload.Description desc, err := updateFn(ctx, prev) if err != nil { @@ -274,7 +274,7 @@ type HighWaterProgressedFn func(ctx context.Context, details jobspb.ProgressDeta // Jobs for which progress computations do not depend on their details can // use the FractionUpdater helper to construct a ProgressedFn. func (j *Job) FractionProgressed(ctx context.Context, progressedFn FractionProgressedFn) error { - return j.Update(ctx, func(_ *client.Txn, md JobMetadata, ju *JobUpdater) error { + return j.Update(ctx, func(_ *kv.Txn, md JobMetadata, ju *JobUpdater) error { if err := md.CheckRunningOrReverting(); err != nil { return err } @@ -301,7 +301,7 @@ func (j *Job) FractionProgressed(ctx context.Context, progressedFn FractionProgr // job's HighWater field to the value returned by progressedFn and persists // progressedFn's modifications to the job's progress details, if any. func (j *Job) HighWaterProgressed(ctx context.Context, progressedFn HighWaterProgressedFn) error { - return j.Update(ctx, func(txn *client.Txn, md JobMetadata, ju *JobUpdater) error { + return j.Update(ctx, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { if err := md.CheckRunningOrReverting(); err != nil { return err } @@ -323,8 +323,8 @@ func (j *Job) HighWaterProgressed(ctx context.Context, progressedFn HighWaterPro // Paused sets the status of the tracked job to paused. It does not directly // pause the job; instead, it expects the job to call job.Progressed soon, // observe a "job is paused" error, and abort further work. -func (j *Job) Paused(ctx context.Context, fn func(context.Context, *client.Txn) error) error { - return j.Update(ctx, func(txn *client.Txn, md JobMetadata, ju *JobUpdater) error { +func (j *Job) Paused(ctx context.Context, fn func(context.Context, *kv.Txn) error) error { + return j.Update(ctx, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status == StatusPaused { // Already paused - do nothing. return nil @@ -347,7 +347,7 @@ func (j *Job) Paused(ctx context.Context, fn func(context.Context, *client.Txn) // expires the job's lease so that a Registry adoption loop detects it and // resumes it. func (j *Job) resumed(ctx context.Context) error { - return j.Update(ctx, func(txn *client.Txn, md JobMetadata, ju *JobUpdater) error { + return j.Update(ctx, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status == StatusRunning || md.Status == StatusReverting { // Already resumed - do nothing. return nil @@ -376,10 +376,8 @@ func (j *Job) resumed(ctx context.Context) error { // Further the node the runs the job will actively cancel it when it notices // that it is in state StatusCancelRequested and will move it to state // StatusReverting. -func (j *Job) cancelRequested( - ctx context.Context, fn func(context.Context, *client.Txn) error, -) error { - return j.Update(ctx, func(txn *client.Txn, md JobMetadata, ju *JobUpdater) error { +func (j *Job) cancelRequested(ctx context.Context, fn func(context.Context, *kv.Txn) error) error { + return j.Update(ctx, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { if md.Payload.Noncancelable { return errors.Newf("job %d: not cancelable", j.ID()) } @@ -407,10 +405,8 @@ func (j *Job) cancelRequested( // not directly pause the job; it expects the node that runs the job will // actively cancel it when it notices that it is in state StatusPauseRequested // and will move it to state StatusPaused. -func (j *Job) pauseRequested( - ctx context.Context, fn func(context.Context, *client.Txn) error, -) error { - return j.Update(ctx, func(txn *client.Txn, md JobMetadata, ju *JobUpdater) error { +func (j *Job) pauseRequested(ctx context.Context, fn func(context.Context, *kv.Txn) error) error { + return j.Update(ctx, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status == StatusPauseRequested || md.Status == StatusPaused { return nil } @@ -429,9 +425,9 @@ func (j *Job) pauseRequested( // Reverted sets the status of the tracked job to reverted. func (j *Job) Reverted( - ctx context.Context, err error, fn func(context.Context, *client.Txn) error, + ctx context.Context, err error, fn func(context.Context, *kv.Txn) error, ) error { - return j.Update(ctx, func(txn *client.Txn, md JobMetadata, ju *JobUpdater) error { + return j.Update(ctx, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status == StatusReverting { return nil } @@ -460,8 +456,8 @@ func (j *Job) Reverted( } // Canceled sets the status of the tracked job to cancel. -func (j *Job) canceled(ctx context.Context, fn func(context.Context, *client.Txn) error) error { - return j.Update(ctx, func(txn *client.Txn, md JobMetadata, ju *JobUpdater) error { +func (j *Job) canceled(ctx context.Context, fn func(context.Context, *kv.Txn) error) error { + return j.Update(ctx, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status == StatusCanceled { return nil } @@ -482,9 +478,9 @@ func (j *Job) canceled(ctx context.Context, fn func(context.Context, *client.Txn // Failed marks the tracked job as having failed with the given error. func (j *Job) Failed( - ctx context.Context, err error, fn func(context.Context, *client.Txn) error, + ctx context.Context, err error, fn func(context.Context, *kv.Txn) error, ) error { - return j.Update(ctx, func(txn *client.Txn, md JobMetadata, ju *JobUpdater) error { + return j.Update(ctx, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { // TODO(spaskob): should we fail if the terminal state is not StatusFailed? if md.Status.Terminal() { // Already done - do nothing. @@ -505,8 +501,8 @@ func (j *Job) Failed( // Succeeded marks the tracked job as having succeeded and sets its fraction // completed to 1.0. -func (j *Job) Succeeded(ctx context.Context, fn func(context.Context, *client.Txn) error) error { - return j.Update(ctx, func(txn *client.Txn, md JobMetadata, ju *JobUpdater) error { +func (j *Job) Succeeded(ctx context.Context, fn func(context.Context, *kv.Txn) error) error { + return j.Update(ctx, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { if md.Status == StatusSucceeded { return nil } @@ -531,7 +527,7 @@ func (j *Job) Succeeded(ctx context.Context, fn func(context.Context, *client.Tx // SetDetails sets the details field of the currently running tracked job. func (j *Job) SetDetails(ctx context.Context, details interface{}) error { - return j.Update(ctx, func(txn *client.Txn, md JobMetadata, ju *JobUpdater) error { + return j.Update(ctx, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { md.Payload.Details = jobspb.WrapPayloadDetails(details) ju.UpdatePayload(md.Payload) return nil @@ -540,7 +536,7 @@ func (j *Job) SetDetails(ctx context.Context, details interface{}) error { // SetProgress sets the details field of the currently running tracked job. func (j *Job) SetProgress(ctx context.Context, details interface{}) error { - return j.Update(ctx, func(txn *client.Txn, md JobMetadata, ju *JobUpdater) error { + return j.Update(ctx, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { md.Progress.Details = jobspb.WrapProgressDetails(details) ju.UpdateProgress(md.Progress) return nil @@ -588,12 +584,12 @@ func (j *Job) FractionCompleted() float32 { // WithTxn sets the transaction that this Job will use for its next operation. // If the transaction is nil, the Job will create a one-off transaction instead. // If you use WithTxn, this Job will no longer be threadsafe. -func (j *Job) WithTxn(txn *client.Txn) *Job { +func (j *Job) WithTxn(txn *kv.Txn) *Job { j.txn = txn return j } -func (j *Job) runInTxn(ctx context.Context, fn func(context.Context, *client.Txn) error) error { +func (j *Job) runInTxn(ctx context.Context, fn func(context.Context, *kv.Txn) error) error { if j.txn != nil { defer func() { j.txn = nil }() // Don't run fn in a retry loop because we need retryable errors to @@ -625,7 +621,7 @@ func HasJobNotFoundError(err error) bool { func (j *Job) load(ctx context.Context) error { var payload *jobspb.Payload var progress *jobspb.Progress - if err := j.runInTxn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := j.runInTxn(ctx, func(ctx context.Context, txn *kv.Txn) error { const stmt = "SELECT payload, progress FROM system.jobs WHERE id = $1" row, err := j.registry.ex.QueryRowEx( ctx, "load-job-query", txn, sqlbase.InternalExecutorSessionDataOverride{User: security.RootUser}, @@ -658,7 +654,7 @@ func (j *Job) insert(ctx context.Context, id int64, lease *jobspb.Lease) error { j.mu.payload.Lease = lease - if err := j.runInTxn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := j.runInTxn(ctx, func(ctx context.Context, txn *kv.Txn) error { // Note: although the following uses ReadTimestamp and // ReadTimestamp can diverge from the value of now() throughout a // transaction, this may be OK -- we merely required ModifiedMicro @@ -687,7 +683,7 @@ func (j *Job) insert(ctx context.Context, id int64, lease *jobspb.Lease) error { func (j *Job) adopt(ctx context.Context, oldLease *jobspb.Lease) error { log.Infof(ctx, "job %d: adopting", *j.ID()) - return j.Update(ctx, func(txn *client.Txn, md JobMetadata, ju *JobUpdater) error { + return j.Update(ctx, func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error { if !md.Payload.Lease.Equal(oldLease) { return errors.Errorf("current lease %v did not match expected lease %v", md.Payload.Lease, oldLease) @@ -737,7 +733,7 @@ func (j *Job) CurrentStatus(ctx context.Context) (Status, error) { return "", errors.New("job has not been created") } var statusString tree.DString - if err := j.runInTxn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := j.runInTxn(ctx, func(ctx context.Context, txn *kv.Txn) error { const selectStmt = "SELECT status FROM system.jobs WHERE id = $1" row, err := j.registry.ex.QueryRow(ctx, "job-status", txn, selectStmt, *j.ID()) if err != nil { diff --git a/pkg/jobs/jobs_test.go b/pkg/jobs/jobs_test.go index d950ef2bb666..b6b328569e87 100644 --- a/pkg/jobs/jobs_test.go +++ b/pkg/jobs/jobs_test.go @@ -22,9 +22,9 @@ import ( "github.com/cockroachdb/apd" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql" @@ -1967,7 +1967,7 @@ func TestStartableJob(t *testing.T) { Progress: jobspb.RestoreProgress{}, } createStartableJob := func(t *testing.T) (sj *jobs.StartableJob) { - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { sj, err = jr.CreateStartableJobWithTxn(ctx, rec, txn, nil) return err })) diff --git a/pkg/jobs/jobsprotectedts/jobs_protected_ts.go b/pkg/jobs/jobsprotectedts/jobs_protected_ts.go index 77c8ffbc5f9a..710644ff03d0 100644 --- a/pkg/jobs/jobsprotectedts/jobs_protected_ts.go +++ b/pkg/jobs/jobsprotectedts/jobs_protected_ts.go @@ -14,8 +14,8 @@ import ( "context" "strconv" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptreconcile" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -33,7 +33,7 @@ const MetaType = "jobs" // MakeStatusFunc returns a function which determines whether the job implied // with this value of meta should be removed by the reconciler. func MakeStatusFunc(jr *jobs.Registry) ptreconcile.StatusFunc { - return func(ctx context.Context, txn *client.Txn, meta []byte) (shouldRemove bool, _ error) { + return func(ctx context.Context, txn *kv.Txn, meta []byte) (shouldRemove bool, _ error) { jobID, err := decodeJobID(meta) if err != nil { return false, err diff --git a/pkg/jobs/jobsprotectedts/jobs_protected_ts_test.go b/pkg/jobs/jobsprotectedts/jobs_protected_ts_test.go index 08cc7e02f29a..05be3fea7a6f 100644 --- a/pkg/jobs/jobsprotectedts/jobs_protected_ts_test.go +++ b/pkg/jobs/jobsprotectedts/jobs_protected_ts_test.go @@ -17,11 +17,11 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -71,7 +71,7 @@ func TestJobsProtectedTimestamp(t *testing.T) { } mkJobAndRecord := func() (j *jobs.Job, rec *ptpb.Record) { ts := s0.Clock().Now() - require.NoError(t, s0.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + require.NoError(t, s0.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { if j, err = jr.CreateJobWithTxn(ctx, mkJobRec(), txn); err != nil { return err } @@ -81,15 +81,15 @@ func TestJobsProtectedTimestamp(t *testing.T) { return j, rec } jMovedToFailed, recMovedToFailed := mkJobAndRecord() - require.NoError(t, jMovedToFailed.Failed(ctx, io.ErrUnexpectedEOF, func(ctx context.Context, txn *client.Txn) error { + require.NoError(t, jMovedToFailed.Failed(ctx, io.ErrUnexpectedEOF, func(ctx context.Context, txn *kv.Txn) error { return nil })) jFinished, recFinished := mkJobAndRecord() - require.NoError(t, jFinished.Succeeded(ctx, func(ctx context.Context, txn *client.Txn) error { + require.NoError(t, jFinished.Succeeded(ctx, func(ctx context.Context, txn *kv.Txn) error { return nil })) _, recRemains := mkJobAndRecord() - ensureNotExists := func(ctx context.Context, txn *client.Txn, ptsID uuid.UUID) (err error) { + ensureNotExists := func(ctx context.Context, txn *kv.Txn, ptsID uuid.UUID) (err error) { _, err = ptp.GetRecord(ctx, txn, ptsID) if err == protectedts.ErrNotExists { return nil @@ -97,7 +97,7 @@ func TestJobsProtectedTimestamp(t *testing.T) { return fmt.Errorf("waiting for %v, got %v", protectedts.ErrNotExists, err) } testutils.SucceedsSoon(t, func() (err error) { - return s0.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return s0.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := ensureNotExists(ctx, txn, recMovedToFailed.ID); err != nil { return err } diff --git a/pkg/jobs/registry.go b/pkg/jobs/registry.go index e87092ec3d32..6830465c9ead 100644 --- a/pkg/jobs/registry.go +++ b/pkg/jobs/registry.go @@ -19,8 +19,8 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagepb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" @@ -95,7 +95,7 @@ type NodeLiveness interface { type Registry struct { ac log.AmbientContext stopper *stop.Stopper - db *client.DB + db *kv.DB ex sqlutil.InternalExecutor clock *hlc.Clock nodeID *base.NodeIDContainer @@ -152,7 +152,7 @@ func MakeRegistry( ac log.AmbientContext, stopper *stop.Stopper, clock *hlc.Clock, - db *client.DB, + db *kv.DB, ex sqlutil.InternalExecutor, nodeID *base.NodeIDContainer, settings *cluster.Settings, @@ -231,7 +231,7 @@ func (r *Registry) CreateAndStartJob( ctx context.Context, resultsCh chan<- tree.Datums, record Record, ) (*Job, <-chan error, error) { var rj *StartableJob - if err := r.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + if err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { rj, err = r.CreateStartableJobWithTxn(ctx, record, txn, resultsCh) return err }); err != nil { @@ -329,9 +329,7 @@ func (r *Registry) NewJob(record Record) *Job { // CreateJobWithTxn creates a job to be started later with StartJob. // It stores the job in the jobs table, marks it pending and gives the // current node a lease. -func (r *Registry) CreateJobWithTxn( - ctx context.Context, record Record, txn *client.Txn, -) (*Job, error) { +func (r *Registry) CreateJobWithTxn(ctx context.Context, record Record, txn *kv.Txn) (*Job, error) { j := r.NewJob(record) if err := j.WithTxn(txn).insert(ctx, r.makeJobID(), r.newLease()); err != nil { return nil, err @@ -353,7 +351,7 @@ func (r *Registry) CreateJobWithTxn( // back then the caller must call CleanupOnRollback to unregister the job from // the Registry. func (r *Registry) CreateStartableJobWithTxn( - ctx context.Context, record Record, txn *client.Txn, resultsCh chan<- tree.Datums, + ctx context.Context, record Record, txn *kv.Txn, resultsCh chan<- tree.Datums, ) (*StartableJob, error) { j, err := r.CreateJobWithTxn(ctx, record, txn) if err != nil { @@ -391,7 +389,7 @@ func (r *Registry) LoadJob(ctx context.Context, jobID int64) (*Job, error) { // LoadJobWithTxn does the same as above, but using the transaction passed in // the txn argument. Passing a nil transaction is equivalent to calling LoadJob // in that a transaction will be automatically created. -func (r *Registry) LoadJobWithTxn(ctx context.Context, jobID int64, txn *client.Txn) (*Job, error) { +func (r *Registry) LoadJobWithTxn(ctx context.Context, jobID int64, txn *kv.Txn) (*Job, error) { j := &Job{ id: &jobID, registry: r, @@ -522,7 +520,7 @@ func (r *Registry) isOrphaned(ctx context.Context, payload *jobspb.Payload) (boo } for _, id := range payload.DescriptorIDs { pendingMutations := false - if err := r.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { td, err := sqlbase.GetTableDescFromID(ctx, txn, id) if err != nil { return err @@ -597,7 +595,7 @@ func (r *Registry) cleanupOldJobs(ctx context.Context, olderThan time.Time) erro // getJobFn attempts to get a resumer from the given job id. If the job id // does not have a resumer then it returns an error message suitable for users. -func (r *Registry) getJobFn(ctx context.Context, txn *client.Txn, id int64) (*Job, Resumer, error) { +func (r *Registry) getJobFn(ctx context.Context, txn *kv.Txn, id int64) (*Job, Resumer, error) { job, err := r.LoadJobWithTxn(ctx, id, txn) if err != nil { return nil, nil, err @@ -610,7 +608,7 @@ func (r *Registry) getJobFn(ctx context.Context, txn *client.Txn, id int64) (*Jo } // CancelRequested marks the job as cancel-requested using the specified txn (may be nil). -func (r *Registry) CancelRequested(ctx context.Context, txn *client.Txn, id int64) error { +func (r *Registry) CancelRequested(ctx context.Context, txn *kv.Txn, id int64) error { job, _, err := r.getJobFn(ctx, txn, id) if err != nil { // Special case schema change jobs to mark the job as canceled. @@ -635,7 +633,7 @@ func (r *Registry) CancelRequested(ctx context.Context, txn *client.Txn, id int6 } // PauseRequested marks the job with id as paused-requested using the specified txn (may be nil). -func (r *Registry) PauseRequested(ctx context.Context, txn *client.Txn, id int64) error { +func (r *Registry) PauseRequested(ctx context.Context, txn *kv.Txn, id int64) error { job, _, err := r.getJobFn(ctx, txn, id) if err != nil { return err @@ -644,7 +642,7 @@ func (r *Registry) PauseRequested(ctx context.Context, txn *client.Txn, id int64 } // Resume resumes the paused job with id using the specified txn (may be nil). -func (r *Registry) Resume(ctx context.Context, txn *client.Txn, id int64) error { +func (r *Registry) Resume(ctx context.Context, txn *kv.Txn, id int64) error { job, _, err := r.getJobFn(ctx, txn, id) if err != nil { return err @@ -990,7 +988,7 @@ WHERE status IN ($1, $2, $3, $4, $5) ORDER BY created DESC` // if multiple nodes execute this. const updateStmt = `UPDATE system.jobs SET status = $1, payload = $2 WHERE id = $3` updateArgs := []interface{}{StatusFailed, payloadBytes, *id} - err = r.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err = r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { _, err := r.ex.Exec(ctx, "job-update", txn, updateStmt, updateArgs...) return err }) @@ -1032,7 +1030,7 @@ WHERE status IN ($1, $2, $3, $4, $5) ORDER BY created DESC` resumeCtx, cancel := r.makeCtx() if pauseRequested := status == StatusPauseRequested; pauseRequested { - if err := job.Paused(ctx, func(context.Context, *client.Txn) error { + if err := job.Paused(ctx, func(context.Context, *kv.Txn) error { r.unregister(*id) return nil }); err != nil { @@ -1044,7 +1042,7 @@ WHERE status IN ($1, $2, $3, $4, $5) ORDER BY created DESC` } if cancelRequested := status == StatusCancelRequested; cancelRequested { - if err := job.Reverted(ctx, errJobCanceled, func(context.Context, *client.Txn) error { + if err := job.Reverted(ctx, errJobCanceled, func(context.Context, *kv.Txn) error { // Unregister the job in case it is running on the node. // Unregister is a no-op for jobs that are not running. r.unregister(*id) diff --git a/pkg/jobs/registry_test.go b/pkg/jobs/registry_test.go index 4f611a73860e..9483345a1037 100644 --- a/pkg/jobs/registry_test.go +++ b/pkg/jobs/registry_test.go @@ -18,8 +18,8 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -47,7 +47,7 @@ func TestRegistryCancelation(t *testing.T) { // of a dep cycle. const histogramWindowInterval = 60 * time.Second - var db *client.DB + var db *kv.DB // Insulate this test from wall time. mClock := hlc.NewManualClock(hlc.UnixNano()) clock := hlc.NewClock(mClock.UnixNano, time.Nanosecond) diff --git a/pkg/jobs/update.go b/pkg/jobs/update.go index d35a50b7ebf0..dedf97bc8d7d 100644 --- a/pkg/jobs/update.go +++ b/pkg/jobs/update.go @@ -15,8 +15,8 @@ import ( "fmt" "strings" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -32,7 +32,7 @@ import ( // // The function is free to modify contents of JobMetadata in place (but the // changes will be ignored unless JobUpdater is used). -type UpdateFn func(txn *client.Txn, md JobMetadata, ju *JobUpdater) error +type UpdateFn func(txn *kv.Txn, md JobMetadata, ju *JobUpdater) error // JobMetadata groups the job metadata values passed to UpdateFn. type JobMetadata struct { @@ -104,7 +104,7 @@ func (j *Job) Update(ctx context.Context, updateFn UpdateFn) error { var payload *jobspb.Payload var progress *jobspb.Progress - if err := j.runInTxn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := j.runInTxn(ctx, func(ctx context.Context, txn *kv.Txn) error { const selectStmt = "SELECT status, payload, progress FROM system.jobs WHERE id = $1" row, err := j.registry.ex.QueryRowEx( ctx, "log-job", txn, sqlbase.InternalExecutorSessionDataOverride{User: security.RootUser}, diff --git a/pkg/kv/batch.go b/pkg/kv/batch.go index 4667843b4c01..665ebbdbdc57 100644 --- a/pkg/kv/batch.go +++ b/pkg/kv/batch.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package client +package kv import ( "context" diff --git a/pkg/kv/bulk/sst_batcher_test.go b/pkg/kv/bulk/sst_batcher_test.go index 420ce3c3aa12..edf25c926c28 100644 --- a/pkg/kv/bulk/sst_batcher_test.go +++ b/pkg/kv/bulk/sst_batcher_test.go @@ -20,8 +20,8 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/bulk" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" @@ -186,7 +186,7 @@ func runTestImport(t *testing.T, batchSizeValue int64) { defer b.Close(ctx) - var expected []client.KeyValue + var expected []kv.KeyValue // Since the batcher automatically handles any retries due to spanning the // range-bounds internally, it can be difficult to observe from outside if @@ -215,7 +215,7 @@ func runTestImport(t *testing.T, batchSizeValue int64) { if err := b.Add(addCtx, k, v.RawBytes); err != nil { t.Fatal(err) } - expected = append(expected, client.KeyValue{Key: k, Value: &v}) + expected = append(expected, kv.KeyValue{Key: k, Value: &v}) } if err := b.Flush(addCtx); err != nil { t.Fatal(err) diff --git a/pkg/kv/client_test.go b/pkg/kv/client_test.go index 0c07e878f8aa..f5b8f545b44a 100644 --- a/pkg/kv/client_test.go +++ b/pkg/kv/client_test.go @@ -11,7 +11,7 @@ /* Package client_test tests clients against a fully-instantiated cockroach cluster (a single node, but bootstrapped, gossiped, etc.). */ -package client_test +package kv_test import ( "bytes" @@ -24,7 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -50,7 +50,7 @@ var errInfo = testutils.MakeCaller(3, 2) // values. The values can be either integers or strings; the expected results // are passed as alternating keys and values, e.g: // checkScanResult(t, result, key1, val1, key2, val2) -func checkKVs(t *testing.T, kvs []client.KeyValue, expected ...interface{}) { +func checkKVs(t *testing.T, kvs []kv.KeyValue, expected ...interface{}) { expLen := len(expected) / 2 if expLen != len(kvs) { t.Errorf("%s: expected %d scan results, got %d", errInfo(), expLen, len(kvs)) @@ -80,7 +80,7 @@ func checkKVs(t *testing.T, kvs []client.KeyValue, expected ...interface{}) { } } -func createTestClient(t *testing.T, s serverutils.TestServerInterface) *client.DB { +func createTestClient(t *testing.T, s serverutils.TestServerInterface) *kv.DB { return s.DB() } @@ -143,7 +143,7 @@ func TestClientRetryNonTxn(t *testing.T) { // doneCall signals when the non-txn read or write has completed. doneCall := make(chan error) count := 0 // keeps track of retries - err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { if test.canPush { if err := txn.SetUserPriority(roachpb.MinUserPriority); err != nil { t.Fatal(err) @@ -241,13 +241,13 @@ func TestClientRunTransaction(t *testing.T) { value := []byte("value") key := []byte(fmt.Sprintf("%s/key-%t", testUser, commit)) - err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { // Put transactional value. if err := txn.Put(ctx, key, value); err != nil { return err } // Attempt to read in another txn. - conflictTxn := client.NewTxn(ctx, db, 0 /* gatewayNodeID */) + conflictTxn := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) conflictTxn.TestingSetPriority(enginepb.MaxTxnPriority) if gr, err := conflictTxn.Get(ctx, key); err != nil { return err @@ -404,7 +404,7 @@ func TestClientBatch(t *testing.T) { keys := []roachpb.Key{} { - b := &client.Batch{} + b := &kv.Batch{} for i := 0; i < 10; i++ { key := roachpb.Key(fmt.Sprintf("%s/key %02d", testUser, i)) keys = append(keys, key) @@ -424,7 +424,7 @@ func TestClientBatch(t *testing.T) { // Now try 2 scans. { - b := &client.Batch{} + b := &kv.Batch{} b.Scan(testUser+"/key 00", testUser+"/key 05") b.Scan(testUser+"/key 05", testUser+"/key 10") if err := db.Run(ctx, b); err != nil { @@ -436,7 +436,7 @@ func TestClientBatch(t *testing.T) { // Try a limited batch of 2 scans. { - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = 7 b.Scan(testUser+"/key 00", testUser+"/key 05") b.Scan(testUser+"/key 05", testUser+"/key 10") @@ -449,7 +449,7 @@ func TestClientBatch(t *testing.T) { // Try a limited batch of 2 scans. { - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = 7 b.Scan(testUser+"/key 05", testUser+"/key 10") b.Scan(testUser+"/key 00", testUser+"/key 05") @@ -462,7 +462,7 @@ func TestClientBatch(t *testing.T) { // Try a limited batch of 2 scans. { - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = 3 b.Scan(testUser+"/key 00", testUser+"/key 05") b.Scan(testUser+"/key 05", testUser+"/key 10") @@ -475,7 +475,7 @@ func TestClientBatch(t *testing.T) { // Try 2 reverse scans. { - b := &client.Batch{} + b := &kv.Batch{} b.ReverseScan(testUser+"/key 00", testUser+"/key 05") b.ReverseScan(testUser+"/key 05", testUser+"/key 10") if err := db.Run(ctx, b); err != nil { @@ -487,7 +487,7 @@ func TestClientBatch(t *testing.T) { // Try a limited batch of 2 reverse scans. { - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = 7 b.ReverseScan(testUser+"/key 00", testUser+"/key 05") b.ReverseScan(testUser+"/key 05", testUser+"/key 10") @@ -500,7 +500,7 @@ func TestClientBatch(t *testing.T) { // Try a limited batch of 2 reverse scans. { - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = 7 b.ReverseScan(testUser+"/key 05", testUser+"/key 10") b.ReverseScan(testUser+"/key 00", testUser+"/key 05") @@ -513,7 +513,7 @@ func TestClientBatch(t *testing.T) { // Try a limited batch of 2 reverse scans. { - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = 3 b.ReverseScan(testUser+"/key 00", testUser+"/key 05") b.ReverseScan(testUser+"/key 05", testUser+"/key 10") @@ -531,7 +531,7 @@ func TestClientBatch(t *testing.T) { t.Fatal(err) } - b := &client.Batch{} + b := &kv.Batch{} b.CPut(key, "goodbyte", nil) // should fail if err := db.Run(ctx, b); err == nil { t.Error("unexpected success") @@ -556,9 +556,9 @@ func TestClientBatch(t *testing.T) { t.Fatal(err) } - b := &client.Batch{} + b := &kv.Batch{} b.CPut(key, "goodbyte", nil) // should fail - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return txn.Run(ctx, b) }); err == nil { t.Error("unexpected success") @@ -581,7 +581,7 @@ func TestClientBatch(t *testing.T) { // read the integers stored at the other's key and add it onto their own. // It is checked that the outcome is serializable, i.e. exactly one of the // two Goroutines (the later write) sees the previous write by the other. -func concurrentIncrements(db *client.DB, t *testing.T) { +func concurrentIncrements(db *kv.DB, t *testing.T) { // wgStart waits for all transactions to line up, wgEnd has the main // function wait for them to finish. var wgStart, wgEnd sync.WaitGroup @@ -598,7 +598,7 @@ func concurrentIncrements(db *client.DB, t *testing.T) { // Wait until the other goroutines are running. wgStart.Wait() - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { txn.SetDebugName(fmt.Sprintf("test-%d", i)) // Retrieve the other key. @@ -681,7 +681,7 @@ func TestReadConsistencyTypes(t *testing.T) { t.Run(rc.String(), func(t *testing.T) { // Mock out DistSender's sender function to check the read consistency for // outgoing BatchRequests and return an empty reply. - factory := client.NonTransactionalFactoryFunc( + factory := kv.NonTransactionalFactoryFunc( func(_ context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if ba.ReadConsistency != rc { @@ -691,11 +691,11 @@ func TestReadConsistencyTypes(t *testing.T) { }) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) - db := client.NewDB(testutils.MakeAmbientCtx(), factory, clock) + db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) ctx := context.TODO() - prepWithRC := func() *client.Batch { - b := &client.Batch{} + prepWithRC := func() *kv.Batch { + b := &kv.Batch{} b.Header.ReadConsistency = rc return b } @@ -722,7 +722,7 @@ func TestReadConsistencyTypes(t *testing.T) { { key := roachpb.Key([]byte("key")) - b := &client.Batch{} + b := &kv.Batch{} b.Header.ReadConsistency = rc b.Get(key) if err := db.Run(ctx, b); err != nil { @@ -741,7 +741,7 @@ func TestTxn_ReverseScan(t *testing.T) { db := createTestClient(t, s) keys := []roachpb.Key{} - b := &client.Batch{} + b := &kv.Batch{} for i := 0; i < 10; i++ { key := roachpb.Key(fmt.Sprintf("%s/key/%02d", testUser, i)) keys = append(keys, key) @@ -752,7 +752,7 @@ func TestTxn_ReverseScan(t *testing.T) { } // Try reverse scans for all keys. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { rows, err := txn.ReverseScan(ctx, testUser+"/key/00", testUser+"/key/10", 100) if err != nil { return err @@ -766,7 +766,7 @@ func TestTxn_ReverseScan(t *testing.T) { } // Try reverse scans for half of the keys. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { rows, err := txn.ReverseScan(ctx, testUser+"/key/00", testUser+"/key/05", 100) if err != nil { return err @@ -778,7 +778,7 @@ func TestTxn_ReverseScan(t *testing.T) { } // Try limit maximum rows. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { rows, err := txn.ReverseScan(ctx, testUser+"/key/00", testUser+"/key/05", 3) if err != nil { return err @@ -790,7 +790,7 @@ func TestTxn_ReverseScan(t *testing.T) { } // Try reverse scan with the same start and end key. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { rows, err := txn.ReverseScan(ctx, testUser+"/key/00", testUser+"/key/00", 100) if len(rows) > 0 { t.Errorf("expected empty, got %v", rows) @@ -803,7 +803,7 @@ func TestTxn_ReverseScan(t *testing.T) { } // Try reverse scan with non-existent key. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { rows, err := txn.ReverseScan(ctx, testUser+"/key/aa", testUser+"/key/bb", 100) if err != nil { return err @@ -822,33 +822,33 @@ func TestNodeIDAndObservedTimestamps(t *testing.T) { // Mock out sender function to check that created transactions // have the observed timestamp set for the configured node ID. - factory := client.MakeMockTxnSenderFactory( + factory := kv.MakeMockTxnSenderFactory( func(_ context.Context, _ *roachpb.Transaction, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { return ba.CreateReply(), nil }) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) - dbCtx := client.DefaultDBContext() + dbCtx := kv.DefaultDBContext() dbCtx.NodeID = &base.NodeIDContainer{} - db := client.NewDBWithContext(testutils.MakeAmbientCtx(), factory, clock, dbCtx) + db := kv.NewDBWithContext(testutils.MakeAmbientCtx(), factory, clock, dbCtx) ctx := context.Background() // Verify direct creation of Txns. directCases := []struct { - typ client.TxnType + typ kv.TxnType nodeID roachpb.NodeID expObserved bool }{ - {typ: client.RootTxn, nodeID: 0, expObserved: false}, - {typ: client.RootTxn, nodeID: 1, expObserved: true}, - {typ: client.LeafTxn, nodeID: 0, expObserved: false}, - {typ: client.LeafTxn, nodeID: 1, expObserved: false}, + {typ: kv.RootTxn, nodeID: 0, expObserved: false}, + {typ: kv.RootTxn, nodeID: 1, expObserved: true}, + {typ: kv.LeafTxn, nodeID: 0, expObserved: false}, + {typ: kv.LeafTxn, nodeID: 1, expObserved: false}, } for i, test := range directCases { t.Run(fmt.Sprintf("direct-txn-%d", i), func(t *testing.T) { now := db.Clock().Now() kvTxn := roachpb.MakeTransaction("unnamed", nil /*baseKey*/, roachpb.NormalUserPriority, now, db.Clock().MaxOffset().Nanoseconds()) - txn := client.NewTxnFromProto(ctx, db, test.nodeID, now, test.typ, &kvTxn) + txn := kv.NewTxnFromProto(ctx, db, test.nodeID, now, test.typ, &kvTxn) ots := txn.TestingCloneTxn().ObservedTimestamps if (len(ots) == 1 && ots[0].NodeID == test.nodeID) != test.expObserved { t.Errorf("expected observed ts %t; got %+v", test.expObserved, ots) @@ -870,7 +870,7 @@ func TestNodeIDAndObservedTimestamps(t *testing.T) { dbCtx.NodeID.Set(ctx, test.nodeID) } if err := db.Txn( - ctx, func(_ context.Context, txn *client.Txn) error { + ctx, func(_ context.Context, txn *kv.Txn) error { ots := txn.TestingCloneTxn().ObservedTimestamps if (len(ots) == 1 && ots[0].NodeID == test.nodeID) != test.expObserved { t.Errorf("expected observed ts %t; got %+v", test.expObserved, ots) @@ -910,7 +910,7 @@ func TestIntentCleanupUnblocksReaders(t *testing.T) { close(done) }() - err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.Put(ctx, key, "txn-value"); err != nil { close(block) return err @@ -948,7 +948,7 @@ func TestRollbackWithCanceledContextBasic(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) key := roachpb.Key("a") - err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.Put(ctx, key, "txn-value"); err != nil { return err } @@ -1007,7 +1007,7 @@ func TestRollbackWithCanceledContextInsidious(t *testing.T) { base.TestServerArgs{Knobs: base.TestingKnobs{Store: &storeKnobs}}) defer s.Stopper().Stop(context.Background()) - err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.Put(ctx, key, "txn-value"); err != nil { return err } diff --git a/pkg/kv/db.go b/pkg/kv/db.go index 6e1a7f42b0a4..98990993a270 100644 --- a/pkg/kv/db.go +++ b/pkg/kv/db.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package client +package kv import ( "context" diff --git a/pkg/kv/db_test.go b/pkg/kv/db_test.go index 03a60d69148e..362fa8aa5559 100644 --- a/pkg/kv/db_test.go +++ b/pkg/kv/db_test.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package client_test +package kv_test import ( "bytes" @@ -16,7 +16,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -27,7 +27,7 @@ func strToValue(s string) *roachpb.Value { return &v } -func setup(t *testing.T) (serverutils.TestServerInterface, *client.DB) { +func setup(t *testing.T) (serverutils.TestServerInterface, *kv.DB) { s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) return s, kvDB } @@ -44,7 +44,7 @@ func checkResult(t *testing.T, expected, result []byte) { } } -func checkResults(t *testing.T, expected map[string][]byte, results []client.Result) { +func checkResults(t *testing.T, expected map[string][]byte, results []kv.Result) { count := 0 for _, result := range results { checkRows(t, expected, result.Rows) @@ -53,7 +53,7 @@ func checkResults(t *testing.T, expected map[string][]byte, results []client.Res checkLen(t, len(expected), count) } -func checkRows(t *testing.T, expected map[string][]byte, rows []client.KeyValue) { +func checkRows(t *testing.T, expected map[string][]byte, rows []kv.KeyValue) { for i, row := range rows { if !bytes.Equal(expected[string(row.Key)], row.ValueBytes()) { t.Errorf("expected %d: %s=\"%s\", got %s=\"%s\"", @@ -198,7 +198,7 @@ func TestBatch(t *testing.T) { s, db := setup(t) defer s.Stopper().Stop(context.TODO()) - b := &client.Batch{} + b := &kv.Batch{} b.Get("aa") b.Put("bb", "2") if err := db.Run(context.TODO(), b); err != nil { @@ -217,7 +217,7 @@ func TestDB_Scan(t *testing.T) { s, db := setup(t) defer s.Stopper().Stop(context.TODO()) - b := &client.Batch{} + b := &kv.Batch{} b.Put("aa", "1") b.Put("ab", "2") b.Put("bb", "3") @@ -242,7 +242,7 @@ func TestDB_ScanForUpdate(t *testing.T) { s, db := setup(t) defer s.Stopper().Stop(context.TODO()) - b := &client.Batch{} + b := &kv.Batch{} b.Put("aa", "1") b.Put("ab", "2") b.Put("bb", "3") @@ -267,7 +267,7 @@ func TestDB_ReverseScan(t *testing.T) { s, db := setup(t) defer s.Stopper().Stop(context.TODO()) - b := &client.Batch{} + b := &kv.Batch{} b.Put("aa", "1") b.Put("ab", "2") b.Put("bb", "3") @@ -292,7 +292,7 @@ func TestDB_ReverseScanForUpdate(t *testing.T) { s, db := setup(t) defer s.Stopper().Stop(context.TODO()) - b := &client.Batch{} + b := &kv.Batch{} b.Put("aa", "1") b.Put("ab", "2") b.Put("bb", "3") @@ -317,7 +317,7 @@ func TestDB_TxnIterate(t *testing.T) { s, db := setup(t) defer s.Stopper().Stop(context.TODO()) - b := &client.Batch{} + b := &kv.Batch{} b.Put("aa", "1") b.Put("ab", "2") b.Put("bb", "3") @@ -329,14 +329,14 @@ func TestDB_TxnIterate(t *testing.T) { {1, 2}, {2, 1}, } - var rows []client.KeyValue = nil + var rows []kv.KeyValue = nil var p int for _, c := range tc { - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { p = 0 - rows = make([]client.KeyValue, 0) + rows = make([]kv.KeyValue, 0) return txn.Iterate(context.TODO(), "a", "b", c.pageSize, - func(rs []client.KeyValue) error { + func(rs []kv.KeyValue) error { p++ rows = append(rows, rs...) return nil @@ -362,7 +362,7 @@ func TestDB_Del(t *testing.T) { s, db := setup(t) defer s.Stopper().Stop(context.TODO()) - b := &client.Batch{} + b := &kv.Batch{} b.Put("aa", "1") b.Put("ab", "2") b.Put("ac", "3") @@ -389,7 +389,7 @@ func TestTxn_Commit(t *testing.T) { s, db := setup(t) defer s.Stopper().Stop(context.TODO()) - err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.Put("aa", "1") b.Put("ab", "2") @@ -399,7 +399,7 @@ func TestTxn_Commit(t *testing.T) { t.Fatal(err) } - b := &client.Batch{} + b := &kv.Batch{} b.Get("aa") b.Get("ab") if err := db.Run(context.TODO(), b); err != nil { diff --git a/pkg/kv/doc.go b/pkg/kv/doc.go index e813f62b19e3..5c2f46814d61 100644 --- a/pkg/kv/doc.go +++ b/pkg/kv/doc.go @@ -120,4 +120,4 @@ allows writes to the same range to be batched together. In cases where the entire transaction affects only a single range, transactions can commit in a single round trip. */ -package client +package kv diff --git a/pkg/kv/kvclient/kvcoord/dist_sender.go b/pkg/kv/kvclient/kvcoord/dist_sender.go index f0f5c7002b66..36954d98675e 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender.go @@ -19,8 +19,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/rpc/nodedialer" @@ -224,7 +224,7 @@ type DistSender struct { disableParallelBatches bool } -var _ client.Sender = &DistSender{} +var _ kv.Sender = &DistSender{} // DistSenderConfig holds configuration and auxiliary objects that can be passed // to NewDistSender. @@ -380,7 +380,7 @@ func (ds *DistSender) RangeLookup( // RangeDescriptor is not on the first range we send the lookup too, we'll // still find it when we scan to the next range. This addresses the issue // described in #18032 and #16266, allowing us to support meta2 splits. - return client.RangeLookup(ctx, ds, key.AsRawKey(), rc, rangeLookupPrefetchCount, useReverseScan) + return kv.RangeLookup(ctx, ds, key.AsRawKey(), rc, rangeLookupPrefetchCount, useReverseScan) } // FirstRange implements the RangeDescriptorDB interface. diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go index 0c86c2604e13..8ee54d04808b 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go @@ -23,8 +23,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" @@ -52,7 +52,7 @@ func strToValue(s string) *roachpb.Value { return &v } -func startNoSplitMergeServer(t *testing.T) (serverutils.TestServerInterface, *client.DB) { +func startNoSplitMergeServer(t *testing.T) (serverutils.TestServerInterface, *kv.DB) { s, _, db := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ @@ -105,7 +105,7 @@ func TestRangeLookupWithOpenTransaction(t *testing.T) { }, ds, ) - db := client.NewDB(ambient, tsf, s.Clock()) + db := kv.NewDB(ambient, tsf, s.Clock()) // Now, with an intent pending, attempt (asynchronously) to read // from an arbitrary key. This will cause the distributed sender to @@ -122,7 +122,7 @@ func TestRangeLookupWithOpenTransaction(t *testing.T) { // setupMultipleRanges creates a database client to the supplied test // server and splits the key range at the given keys. Returns the DB // client. -func setupMultipleRanges(ctx context.Context, db *client.DB, splitAt ...string) error { +func setupMultipleRanges(ctx context.Context, db *kv.DB, splitAt ...string) error { // Split the keyspace at the given keys. for _, key := range splitAt { if err := db.AdminSplit(ctx, key /* spanKey */, key /* splitKey */, hlc.MaxTimestamp /* expirationTime */); err != nil { @@ -162,7 +162,7 @@ type checkOptions struct { func checkSpanResults( t *testing.T, spans [][]string, - results []client.Result, + results []kv.Result, expResults [][]string, expSatisfied map[int]struct{}, opt checkOptions, @@ -197,7 +197,7 @@ func checkSpanResults( func checkResumeSpanScanResults( t *testing.T, spans [][]string, - results []client.Result, + results []kv.Result, expResults [][]string, expSatisfied map[int]struct{}, opt checkOptions, @@ -251,7 +251,7 @@ func checkResumeSpanScanResults( func checkResumeSpanReverseScanResults( t *testing.T, spans [][]string, - results []client.Result, + results []kv.Result, expResults [][]string, expSatisfied map[int]struct{}, opt checkOptions, @@ -299,7 +299,7 @@ func checkResumeSpanReverseScanResults( func checkScanResults( t *testing.T, spans [][]string, - results []client.Result, + results []kv.Result, expResults [][]string, expSatisfied map[int]struct{}, opt checkOptions, @@ -312,7 +312,7 @@ func checkScanResults( func checkReverseScanResults( t *testing.T, spans [][]string, - results []client.Result, + results []kv.Result, expResults [][]string, expSatisfied map[int]struct{}, opt checkOptions, @@ -428,7 +428,7 @@ func TestMultiRangeBoundedBatchScan(t *testing.T) { for _, reverse := range []bool{false, true} { for bound := 1; bound <= maxBound; bound++ { t.Run(fmt.Sprintf("reverse=%t,bound=%d", reverse, bound), func(t *testing.T) { - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = int64(bound) for _, span := range scans { @@ -471,7 +471,7 @@ func TestMultiRangeBoundedBatchScan(t *testing.T) { // Re-query using the resume spans that were returned; check that all // spans are read properly. if bound < maxExpCount { - newB := &client.Batch{} + newB := &kv.Batch{} for _, res := range b.Results { if res.ResumeSpan != nil { if !reverse { @@ -532,7 +532,7 @@ func TestMultiRangeBoundedBatchScanUnsortedOrder(t *testing.T) { } bound := 6 - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = int64(bound) // Two non-overlapping requests out of order. spans := [][]string{{"b3", "c2"}, {"a", "b3"}} @@ -571,7 +571,7 @@ func TestMultiRangeBoundedBatchScanSortedOverlapping(t *testing.T) { } bound := 6 - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = int64(bound) // Two ordered overlapping requests. spans := [][]string{{"a", "d"}, {"b", "g"}} @@ -591,7 +591,7 @@ func TestMultiRangeBoundedBatchScanSortedOverlapping(t *testing.T) { // check ResumeSpan in the DelRange results. func checkResumeSpanDelRangeResults( - t *testing.T, spans [][]string, results []client.Result, expResults [][]string, expCount int, + t *testing.T, spans [][]string, results []kv.Result, expResults [][]string, expCount int, ) { for i, res := range results { // Check ResumeSpan when request has been processed. @@ -652,7 +652,7 @@ func TestMultiRangeBoundedBatchDelRange(t *testing.T) { } } - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = int64(bound) spans := [][]string{{"a", "c"}, {"c", "f"}, {"g", "h"}} for _, span := range spans { @@ -707,7 +707,7 @@ func TestMultiRangeBoundedBatchDelRangeBoundary(t *testing.T) { } } - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = 3 b.DelRange("a", "c", true /* returnKeys */) if err := db.Run(ctx, b); err != nil { @@ -720,7 +720,7 @@ func TestMultiRangeBoundedBatchDelRangeBoundary(t *testing.T) { t.Fatalf("received ResumeSpan %+v", b.Results[0].ResumeSpan) } - b = &client.Batch{} + b = &kv.Batch{} b.Header.MaxSpanRequestKeys = 1 b.DelRange("b", "c", true /* returnKeys */) if err := db.Run(ctx, b); err != nil { @@ -765,7 +765,7 @@ func TestMultiRangeBoundedBatchDelRangeOverlappingKeys(t *testing.T) { } } - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = int64(bound) spans := [][]string{{"a", "b3"}, {"b", "d"}, {"c", "f2a"}, {"f1a", "g"}} for _, span := range spans { @@ -814,7 +814,7 @@ func TestMultiRangeEmptyAfterTruncate(t *testing.T) { // Delete the keys within a transaction. The range [c,d) doesn't have // any active requests. - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.DelRange("a", "b", false /* returnKeys */) b.DelRange("e", "f", false /* returnKeys */) @@ -834,7 +834,7 @@ func TestMultiRequestBatchWithFwdAndReverseRequests(t *testing.T) { if err := setupMultipleRanges(ctx, db, "a", "b"); err != nil { t.Fatal(err) } - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = 100 b.Scan("a", "b") b.ReverseScan("a", "b") @@ -879,7 +879,7 @@ func TestMultiRangeScanReverseScanDeleteResolve(t *testing.T) { // Delete the keys within a transaction. Implicitly, the intents are // resolved via ResolveIntentRange upon completion. - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.DelRange("a", "d", false /* returnKeys */) return txn.CommitInBatch(ctx, b) @@ -926,7 +926,7 @@ func TestMultiRangeScanReverseScanInconsistent(t *testing.T) { keys := [2]string{"a", "b"} ts := [2]hlc.Timestamp{} for i, key := range keys { - b := &client.Batch{} + b := &kv.Batch{} b.Put(key, "value") if err := db.Run(ctx, b); err != nil { t.Fatal(err) @@ -968,7 +968,7 @@ func TestMultiRangeScanReverseScanInconsistent(t *testing.T) { s.(*server.TestServer).Gossip(), ) - reply, err := client.SendWrappedWith(context.Background(), ds, roachpb.Header{ + reply, err := kv.SendWrappedWith(context.Background(), ds, roachpb.Header{ ReadConsistency: rc, }, request) if err != nil { @@ -1019,7 +1019,7 @@ func TestParallelSender(t *testing.T) { psCount := getPSCount() // Batch writes to each range. - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() for _, key := range splitKeys { b.Put(key, "val") @@ -1046,7 +1046,7 @@ func TestParallelSender(t *testing.T) { } } -func initReverseScanTestEnv(s serverutils.TestServerInterface, t *testing.T) *client.DB { +func initReverseScanTestEnv(s serverutils.TestServerInterface, t *testing.T) *kv.DB { db := s.DB() // Set up multiple ranges: @@ -1180,7 +1180,7 @@ func TestBatchPutWithConcurrentSplit(t *testing.T) { SplitKey: roachpb.Key(key), ExpirationTime: hlc.MaxTimestamp, } - if _, err := client.SendWrapped(context.Background(), ds, req); err != nil { + if _, err := kv.SendWrapped(context.Background(), ds, req); err != nil { t.Fatal(err) } } @@ -1188,7 +1188,7 @@ func TestBatchPutWithConcurrentSplit(t *testing.T) { // Execute a batch on the default sender. Since its cache will not // have been updated to reflect the new splits, it will discover // them partway through and need to reinvoke divideAndSendBatchToRanges. - b := &client.Batch{} + b := &kv.Batch{} for i, key := range []string{"a1", "b1", "c1", "d1", "f1"} { b.Put(key, fmt.Sprintf("value-%d", i)) } @@ -1340,7 +1340,7 @@ func TestPropagateTxnOnError(t *testing.T) { // requests: Put, CPut, and Put. The CPut operation will get a // ReadWithinUncertaintyIntervalError and the txn will be retried. epoch := 0 - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { // Observe the commit timestamp to prevent refreshes. _ = txn.CommitTimestamp() @@ -1419,7 +1419,7 @@ func TestTxnStarvation(t *testing.T) { }() epoch := 0 - if err := s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { epoch++ <-haveWritten time.Sleep(1 * time.Millisecond) @@ -1475,7 +1475,7 @@ func TestAsyncAbortPoisons(t *testing.T) { db := s.DB() // Write values to key "a". - txn := client.NewTxn(ctx, db, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) b := txn.NewBatch() b.Put(keyA, []byte("value")) if err := txn.Run(ctx, b); err != nil { @@ -1483,7 +1483,7 @@ func TestAsyncAbortPoisons(t *testing.T) { } // Run a high-priority txn that will abort the previous one. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetUserPriority(roachpb.MaxUserPriority); err != nil { return err } @@ -1555,9 +1555,9 @@ func TestTxnCoordSenderRetries(t *testing.T) { testCases := []struct { name string - beforeTxnStart func(context.Context, *client.DB) error // called before the txn starts - afterTxnStart func(context.Context, *client.DB) error // called after the txn chooses a timestamp - retryable func(context.Context, *client.Txn) error // called during the txn; may be retried + beforeTxnStart func(context.Context, *kv.DB) error // called before the txn starts + afterTxnStart func(context.Context, *kv.DB) error // called after the txn chooses a timestamp + retryable func(context.Context, *kv.Txn) error // called during the txn; may be retried filter func(storagebase.FilterArgs) *roachpb.Error tsLeaked bool // If both of these are false, no retries. @@ -1567,21 +1567,21 @@ func TestTxnCoordSenderRetries(t *testing.T) { }{ { name: "forwarded timestamp with get and put", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "a") // read key to set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.Put(ctx, "a", "put") // put to advance txn ts }, }, { name: "forwarded timestamp with get and put timestamp leaked", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "a") // read key to set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.Put(ctx, "a", "put") // put to advance txn ts }, tsLeaked: true, @@ -1589,34 +1589,34 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "forwarded timestamp with get and initput", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "a") // read key to set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.InitPut(ctx, "a", "put", false /* failOnTombstones */) // put to advance txn ts }, }, { name: "forwarded timestamp with get and cput", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "a") // read key to set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.CPut(ctx, "a", "cput", strToValue("put")) // cput to advance txn ts, set update span }, }, { name: "forwarded timestamp with get and cput timestamp leaked", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "put") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "a") // read key to set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.CPut(ctx, "a", "cput", strToValue("put")) // cput to advance txn ts, set update span }, tsLeaked: true, @@ -1624,21 +1624,21 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "forwarded timestamp with scan and cput", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Scan(ctx, "a", "az", 0) // scan sets ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.CPut(ctx, "ab", "cput", nil) // cput advances, sets update span }, }, { name: "forwarded timestamp with delete range", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "a") // read key to set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.DelRange(ctx, "a", "b") }, // Expect a transaction coord retry, which should succeed. @@ -1646,11 +1646,11 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "forwarded timestamp with put in batch commit", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "a") // set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.Put("a", "put") return txn.CommitInBatch(ctx, b) @@ -1659,14 +1659,14 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "forwarded timestamp with cput in batch commit", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "orig") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "a") // set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.CPut("a", "cput", strToValue("orig")) return txn.CommitInBatch(ctx, b) @@ -1677,10 +1677,10 @@ func TestTxnCoordSenderRetries(t *testing.T) { // If we've exhausted the limit for tracking refresh spans but we // already refreshed, keep running the txn. name: "forwarded timestamp with too many refreshes, read only", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "value") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { // Make the batch large enough such that when we accounted for // all of its spans then we exceed the limit on refresh spans. // This is not an issue because we refresh before tracking their @@ -1707,11 +1707,11 @@ func TestTxnCoordSenderRetries(t *testing.T) { // has been pushed, if we successfully commit then we won't hit an // error. name: "forwarded timestamp with too many refreshes in batch commit", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "a") // set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { // Advance timestamp. if err := txn.Put(ctx, "a", "put"); err != nil { return err @@ -1740,11 +1740,11 @@ func TestTxnCoordSenderRetries(t *testing.T) { // error. This is the case even if the final batch itself causes a // refresh. name: "forwarded timestamp with too many refreshes in batch commit triggering refresh", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "a") // set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { // Advance timestamp. This also creates a refresh span which // will prevent the txn from committing without a refresh. if err := txn.DelRange(ctx, "a", "b"); err != nil { @@ -1769,20 +1769,20 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "write too old with put", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "put") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.Put(ctx, "a", "put") }, txnCoordRetry: true, }, { name: "write too old with put timestamp leaked", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "put") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.Put(ctx, "a", "put") }, tsLeaked: true, @@ -1790,10 +1790,10 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "write too old with get in the clear", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "put") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { if _, err := txn.Get(ctx, "b"); err != nil { return err } @@ -1803,10 +1803,10 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "write too old with get conflict", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "put") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { if _, err := txn.Get(ctx, "a"); err != nil { return err } @@ -1816,10 +1816,10 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "write too old with multiple puts to same key", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "value1") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { // Get so we must refresh when txn timestamp moves forward. if _, err := txn.Get(ctx, "a"); err != nil { return err @@ -1844,13 +1844,13 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "write too old with cput matching newer value", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "value") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "put") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.CPut(ctx, "a", "cput", strToValue("put")) }, txnCoordRetry: false, // fails on first attempt at cput @@ -1858,13 +1858,13 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "write too old with cput matching older value", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "value") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "put") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.CPut(ctx, "a", "cput", strToValue("value")) }, txnCoordRetry: false, // non-matching value means we fail txn coord retry @@ -1872,28 +1872,28 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "write too old with cput matching older and newer values", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "value") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "value") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.CPut(ctx, "a", "cput", strToValue("value")) }, txnCoordRetry: true, }, { name: "write too old with increment", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Inc(ctx, "inc", 1) return err }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Inc(ctx, "inc", 1) return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { val, err := txn.Inc(ctx, "inc", 1) if err != nil { return err @@ -1907,10 +1907,10 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "write too old with initput", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "iput", "put") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.InitPut(ctx, "iput", "put", false) }, txnCoordRetry: true, // fails on first attempt at cput with write too old @@ -1918,13 +1918,13 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "write too old with initput matching older and newer values", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "iput", "put") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "iput", "put") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.InitPut(ctx, "iput", "put", false) }, // Expect a transaction coord retry, which should succeed. @@ -1932,13 +1932,13 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "write too old with initput matching older value", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "iput", "put1") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "iput", "put2") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.InitPut(ctx, "iput", "put1", false) }, txnCoordRetry: false, // non-matching value means we fail txn coord retry @@ -1946,13 +1946,13 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "write too old with initput matching newer value", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "iput", "put1") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "iput", "put2") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.InitPut(ctx, "iput", "put2", false) }, // No txn coord retry as we get condition failed error. @@ -1960,26 +1960,26 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "write too old with initput failing on tombstone before", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Del(ctx, "iput") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "iput", "put2") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.InitPut(ctx, "iput", "put2", true) }, expFailure: "unexpected value", // condition failed error when failing on tombstones }, { name: "write too old with initput failing on tombstone after", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "iput", "put") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Del(ctx, "iput") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.InitPut(ctx, "iput", "put", true) }, txnCoordRetry: false, // non-matching value means we fail txn coord retry @@ -1990,10 +1990,10 @@ func TestTxnCoordSenderRetries(t *testing.T) { // The Put gets a write too old error but, since there's no refresh spans, // the commit succeeds. name: "write too old with put in batch commit", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "put") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.Put("a", "new-put") return txn.CommitInBatch(ctx, b) // will be a 1PC, won't get auto retry @@ -2008,13 +2008,13 @@ func TestTxnCoordSenderRetries(t *testing.T) { // successful since there are no refresh spans (the request will succeed // after a server-side refresh). name: "write too old in staging commit", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "orig") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "put") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { if err := txn.Put(ctx, "another", "another put"); err != nil { return err } @@ -2027,13 +2027,13 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "write too old with cput in batch commit", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "orig") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "put") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.CPut("a", "cput", strToValue("put")) return txn.CommitInBatch(ctx, b) // will be a 1PC, won't get auto retry @@ -2048,13 +2048,13 @@ func TestTxnCoordSenderRetries(t *testing.T) { // This test is like the previous one, except the 1PC batch cannot commit // at the updated timestamp. name: "write too old with failed cput in batch commit", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "orig") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "put") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.CPut("a", "cput", strToValue("orig")) return txn.CommitInBatch(ctx, b) // will be a 1PC, won't get auto retry @@ -2063,11 +2063,11 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "multi-range batch with forwarded timestamp", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "c") // set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.Put("a", "put") b.Put("c", "put") @@ -2077,14 +2077,14 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "multi-range batch with forwarded timestamp and cput", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "value") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "a") // set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.CPut("a", "cput", strToValue("value")) b.Put("c", "put") @@ -2093,14 +2093,14 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "multi-range batch with forwarded timestamp and cput and get", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "value") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "a") // set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { if _, err := txn.Get(ctx, "b"); err != nil { // Get triggers retry return err } @@ -2113,14 +2113,14 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "multi-range batch with forwarded timestamp and cput and delete range", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "c", "value") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { _, err := db.Get(ctx, "a") // set ts cache return err }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.DelRange("a", "b", false /* returnKeys */) b.CPut("c", "cput", strToValue("value")) @@ -2130,10 +2130,10 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "multi-range batch with write too old", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "c", "value") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.Put("a", "put") b.Put("c", "put") @@ -2143,13 +2143,13 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "multi-range batch with write too old and failed cput", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "orig") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "value") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.CPut("a", "cput", strToValue("orig")) b.Put("c", "put") @@ -2160,13 +2160,13 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "multi-range batch with write too old and successful cput", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "orig") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "orig") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.CPut("a", "cput", strToValue("orig")) b.Put("c", "put") @@ -2181,7 +2181,7 @@ func TestTxnCoordSenderRetries(t *testing.T) { // successful refresh, and that previously-successful prefix sub-batches // are not refreshed (but are retried instead). name: "multi-range with scan getting updated results after refresh", - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { // Write to "a". This value will not be seen by the Get the first time // it's evaluated, but it will be see when it's retried at a bumped // timestamp. In particular, this verifies that the get is not @@ -2195,7 +2195,7 @@ func TestTxnCoordSenderRetries(t *testing.T) { // refresh. return db.Put(ctx, "b", "newval2") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.Get("a") b.Put("b", "put2") @@ -2215,10 +2215,10 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "cput within uncertainty interval", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "value") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.CPut(ctx, "a", "cput", strToValue("value")) }, filter: newUncertaintyFilter(roachpb.Key([]byte("a"))), @@ -2226,10 +2226,10 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "cput within uncertainty interval timestamp leaked", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "value") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.CPut(ctx, "a", "cput", strToValue("value")) }, filter: newUncertaintyFilter(roachpb.Key([]byte("a"))), @@ -2238,10 +2238,10 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "reads within uncertainty interval", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "value") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { if _, err := txn.Get(ctx, "aa"); err != nil { return err } @@ -2258,13 +2258,13 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "reads within uncertainty interval and violating concurrent put", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "value") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "ab", "value") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { if _, err := txn.Get(ctx, "aa"); err != nil { return err } @@ -2281,10 +2281,10 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "multi-range batch with uncertainty interval error", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "c", "value") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { if err := txn.Put(ctx, "a", "put"); err != nil { return err } @@ -2297,16 +2297,16 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "multi-range batch with uncertainty interval error and get conflict", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "a", "init") }, - afterTxnStart: func(ctx context.Context, db *client.DB) error { + afterTxnStart: func(ctx context.Context, db *kv.DB) error { if err := db.Put(ctx, "b", "value"); err != nil { return err } return db.Put(ctx, "a", "value") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { if _, err := txn.Get(ctx, "b"); err != nil { return err } @@ -2319,10 +2319,10 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "multi-range batch with uncertainty interval error and mixed success", - beforeTxnStart: func(ctx context.Context, db *client.DB) error { + beforeTxnStart: func(ctx context.Context, db *kv.DB) error { return db.Put(ctx, "c", "value") }, - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.Put("a", "put") b.CPut("c", "cput", strToValue("value")) @@ -2334,7 +2334,7 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "multi-range scan with uncertainty interval error", - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { _, err := txn.Scan(ctx, "a", "d", 0) return err }, @@ -2344,7 +2344,7 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "multi-range delete range with uncertainty interval error", - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { return txn.DelRange(ctx, "a", "d") }, filter: newUncertaintyFilter(roachpb.Key([]byte("c"))), @@ -2353,7 +2353,7 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "missing pipelined write caught on chain", - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { if err := txn.Put(ctx, "a", "put"); err != nil { return err } @@ -2380,7 +2380,7 @@ func TestTxnCoordSenderRetries(t *testing.T) { }, { name: "missing pipelined write caught on commit", - retryable: func(ctx context.Context, txn *client.Txn) error { + retryable: func(ctx context.Context, txn *kv.Txn) error { if err := txn.Put(ctx, "a", "put"); err != nil { return err } @@ -2422,7 +2422,7 @@ func TestTxnCoordSenderRetries(t *testing.T) { var lastAutoRetries int64 var hadClientRetry bool epoch := 0 - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if tc.tsLeaked { // Read the commit timestamp so the expectation is that // this transaction cannot be restarted internally. diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_test.go index 79cccc240c50..e3dc48f657d9 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_test.go @@ -27,8 +27,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/gossip/simulation" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/rpc/nodedialer" @@ -403,7 +403,7 @@ func TestSendRPCOrder(t *testing.T) { } // Kill the cached NodeDescriptor, enforcing a lookup from Gossip. ds.nodeDescriptor = nil - if _, err := client.SendWrappedWith(context.Background(), ds, roachpb.Header{ + if _, err := kv.SendWrappedWith(context.Background(), ds, roachpb.Header{ RangeID: rangeID, // Not used in this test, but why not. ReadConsistency: consistency, }, args); err != nil { @@ -538,7 +538,7 @@ func TestImmutableBatchArgs(t *testing.T) { txn.UpdateObservedTimestamp(1, hlc.MaxTimestamp) put := roachpb.NewPut(roachpb.Key("don't"), roachpb.Value{}) - if _, pErr := client.SendWrappedWith(context.Background(), ds, roachpb.Header{ + if _, pErr := kv.SendWrappedWith(context.Background(), ds, roachpb.Header{ Txn: &txn, }, put); pErr != nil { t.Fatal(pErr) @@ -599,7 +599,7 @@ func TestRetryOnNotLeaseHolderError(t *testing.T) { ds := NewDistSender(cfg, g) v := roachpb.MakeValueFromString("value") put := roachpb.NewPut(roachpb.Key("a"), v) - if _, pErr := client.SendWrapped(context.Background(), ds, put); !testutils.IsPError(pErr, "boom") { + if _, pErr := kv.SendWrapped(context.Background(), ds, put); !testutils.IsPError(pErr, "boom") { t.Fatalf("unexpected error: %v", pErr) } if first { @@ -689,7 +689,7 @@ func TestBackoffOnNotLeaseHolderErrorDuringTransfer(t *testing.T) { ds := NewDistSender(cfg, g) v := roachpb.MakeValueFromString("value") put := roachpb.NewPut(roachpb.Key("a"), v) - if _, pErr := client.SendWrapped(context.Background(), ds, put); !testutils.IsPError(pErr, "boom") { + if _, pErr := kv.SendWrapped(context.Background(), ds, put); !testutils.IsPError(pErr, "boom") { t.Fatalf("%d: unexpected error: %v", i, pErr) } if got := ds.Metrics().InLeaseTransferBackoffs.Count(); got != c.expected { @@ -829,7 +829,7 @@ func TestRetryOnDescriptorLookupError(t *testing.T) { ds := NewDistSender(cfg, g) put := roachpb.NewPut(roachpb.Key("a"), roachpb.MakeValueFromString("value")) // Error on descriptor lookup, second attempt successful. - if _, pErr := client.SendWrapped(context.Background(), ds, put); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), ds, put); pErr != nil { t.Errorf("unexpected error: %s", pErr) } if len(errs) != 0 { @@ -885,7 +885,7 @@ func TestEvictOnFirstRangeGossip(t *testing.T) { TestingKnobs: ClientTestingKnobs{ TransportFactory: SenderTransportFactory( tracing.NewTracer(), - client.SenderFunc(sender), + kv.SenderFunc(sender), ), }, RangeDescriptorDB: rDB, @@ -1018,7 +1018,7 @@ func TestEvictCacheOnError(t *testing.T) { key := roachpb.Key("a") put := roachpb.NewPut(key, roachpb.MakeValueFromString("value")) - if _, pErr := client.SendWrapped(ctx, ds, put); pErr != nil && !testutils.IsPError(pErr, errString) && !testutils.IsError(pErr.GoError(), ctx.Err().Error()) { + if _, pErr := kv.SendWrapped(ctx, ds, put); pErr != nil && !testutils.IsPError(pErr, errString) && !testutils.IsError(pErr.GoError(), ctx.Err().Error()) { t.Errorf("put encountered unexpected error: %s", pErr) } if _, ok := ds.leaseHolderCache.Lookup(context.TODO(), 1); ok != !tc.shouldClearLeaseHolder { @@ -1086,7 +1086,7 @@ func TestEvictCacheOnUnknownLeaseHolder(t *testing.T) { key := roachpb.Key("a") put := roachpb.NewPut(key, roachpb.MakeValueFromString("value")) - if _, pErr := client.SendWrapped(context.Background(), ds, put); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), ds, put); pErr != nil { t.Errorf("put encountered unexpected error: %s", pErr) } if count != 3 { @@ -1126,7 +1126,7 @@ func TestRetryOnWrongReplicaError(t *testing.T) { if err != nil { t.Fatal(err) } - if client.TestingIsRangeLookup(ba) { + if kv.TestingIsRangeLookup(ba) { if bytes.HasPrefix(rs.Key, keys.Meta1Prefix) { br := &roachpb.BatchResponse{} r := &roachpb.ScanResponse{} @@ -1185,7 +1185,7 @@ func TestRetryOnWrongReplicaError(t *testing.T) { } ds := NewDistSender(cfg, g) scan := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("d"), false) - if _, err := client.SendWrapped(context.Background(), ds, scan); err != nil { + if _, err := kv.SendWrapped(context.Background(), ds, scan); err != nil { t.Errorf("scan encountered error: %s", err) } } @@ -1224,7 +1224,7 @@ func TestRetryOnWrongReplicaErrorWithSuggestion(t *testing.T) { if err != nil { t.Fatal(err) } - if client.TestingIsRangeLookup(ba) { + if kv.TestingIsRangeLookup(ba) { if bytes.HasPrefix(rs.Key, keys.Meta1Prefix) { br := &roachpb.BatchResponse{} r := &roachpb.ScanResponse{} @@ -1281,7 +1281,7 @@ func TestRetryOnWrongReplicaErrorWithSuggestion(t *testing.T) { } ds := NewDistSender(cfg, g) scan := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("d"), false) - if _, err := client.SendWrapped(context.Background(), ds, scan); err != nil { + if _, err := kv.SendWrapped(context.Background(), ds, scan); err != nil { t.Errorf("scan encountered error: %s", err) } } @@ -1399,7 +1399,7 @@ func TestSendRPCRetry(t *testing.T) { } ds := NewDistSender(cfg, g) scan := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("d"), false) - sr, err := client.SendWrappedWith(context.Background(), ds, roachpb.Header{MaxSpanRequestKeys: 1}, scan) + sr, err := kv.SendWrappedWith(context.Background(), ds, roachpb.Header{MaxSpanRequestKeys: 1}, scan) if err != nil { t.Fatal(err) } @@ -1486,7 +1486,7 @@ func TestSendRPCRangeNotFoundError(t *testing.T) { } ds = NewDistSender(cfg, g) get := roachpb.NewGet(roachpb.Key("b")) - _, err := client.SendWrapped(context.Background(), ds, get) + _, err := kv.SendWrapped(context.Background(), ds, get) if err != nil { t.Fatal(err) } @@ -1555,7 +1555,7 @@ func TestMultiRangeGapReverse(t *testing.T) { }) } - sender := client.SenderFunc( + sender := kv.SenderFunc( func(_ context.Context, args roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { rb := args.CreateReply() return rb, nil @@ -1707,7 +1707,7 @@ func TestMultiRangeMergeStaleDescriptor(t *testing.T) { ds := NewDistSender(cfg, g) scan := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("d"), false) // Set the Txn info to avoid an OpRequiresTxnError. - reply, err := client.SendWrappedWith(context.Background(), ds, roachpb.Header{ + reply, err := kv.SendWrappedWith(context.Background(), ds, roachpb.Header{ MaxSpanRequestKeys: 10, Txn: &roachpb.Transaction{}, }, scan) @@ -1752,7 +1752,7 @@ func TestRangeLookupOptionOnReverseScan(t *testing.T) { rScan := &roachpb.ReverseScanRequest{ RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("a"), EndKey: roachpb.Key("b")}, } - if _, err := client.SendWrapped(context.Background(), ds, rScan); err != nil { + if _, err := kv.SendWrapped(context.Background(), ds, rScan); err != nil { t.Fatal(err) } } @@ -1781,9 +1781,9 @@ func TestClockUpdateOnResponse(t *testing.T) { // Prepare the test function put := roachpb.NewPut(roachpb.Key("a"), roachpb.MakeValueFromString("value")) - doCheck := func(sender client.Sender, fakeTime hlc.Timestamp) { + doCheck := func(sender kv.Sender, fakeTime hlc.Timestamp) { ds.transportFactory = SenderTransportFactory(tracing.NewTracer(), sender) - _, err := client.SendWrapped(context.Background(), ds, put) + _, err := kv.SendWrapped(context.Background(), ds, put) if err != nil && err != expectedErr { t.Fatal(err) } @@ -1795,7 +1795,7 @@ func TestClockUpdateOnResponse(t *testing.T) { // Test timestamp propagation on valid BatchResults. fakeTime := ds.clock.Now().Add(10000000000 /*10s*/, 0) - replyNormal := client.SenderFunc( + replyNormal := kv.SenderFunc( func(_ context.Context, args roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { rb := args.CreateReply() rb.Now = fakeTime @@ -1805,7 +1805,7 @@ func TestClockUpdateOnResponse(t *testing.T) { // Test timestamp propagation on errors. fakeTime = ds.clock.Now().Add(10000000000 /*10s*/, 0) - replyError := client.SenderFunc( + replyError := kv.SenderFunc( func(_ context.Context, _ roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { pErr := expectedErr pErr.Now = fakeTime @@ -2594,7 +2594,7 @@ func TestSenderTransport(t *testing.T) { defer leaktest.AfterTest(t)() transport, err := SenderTransportFactory( tracing.NewTracer(), - client.SenderFunc( + kv.SenderFunc( func( _ context.Context, _ roachpb.BatchRequest, @@ -3059,7 +3059,7 @@ func TestCanSendToFollower(t *testing.T) { ds.clusterID = &base.ClusterIDContainer{} // set 2 to be the leaseholder ds.LeaseHolderCache().Update(context.TODO(), 2, 2) - if _, pErr := client.SendWrappedWith(context.Background(), ds, c.header, c.msg); !testutils.IsPError(pErr, "boom") { + if _, pErr := kv.SendWrappedWith(context.Background(), ds, c.header, c.msg); !testutils.IsPError(pErr, "boom") { t.Fatalf("%d: unexpected error: %v", i, pErr) } if sentTo.NodeID != c.expectedNode { @@ -3132,7 +3132,7 @@ func TestEvictMetaRange(t *testing.T) { if err != nil { t.Fatal(err) } - if !client.TestingIsRangeLookup(ba) { + if !kv.TestingIsRangeLookup(ba) { return ba.CreateReply(), nil } @@ -3223,7 +3223,7 @@ func TestEvictMetaRange(t *testing.T) { ds := NewDistSender(cfg, g) scan := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("b"), false) - if _, pErr := client.SendWrapped(context.Background(), ds, scan); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), ds, scan); pErr != nil { t.Fatalf("scan encountered error: %s", pErr) } @@ -3239,7 +3239,7 @@ func TestEvictMetaRange(t *testing.T) { isStale = true scan = roachpb.NewScan(roachpb.Key("b"), roachpb.Key("c"), false) - if _, pErr := client.SendWrapped(context.Background(), ds, scan); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), ds, scan); pErr != nil { t.Fatalf("scan encountered error: %s", pErr) } @@ -3397,7 +3397,7 @@ func TestEvictionTokenCoalesce(t *testing.T) { br.Error = roachpb.NewError(err) return br, nil } - if !client.TestingIsRangeLookup(ba) { + if !kv.TestingIsRangeLookup(ba) { // Return a SendError so DistSender retries the first range lookup in the // user key-space for both batches. if atomic.AddInt32(&sendErrors, 1) <= 2 { @@ -3470,7 +3470,7 @@ func TestEvictionTokenCoalesce(t *testing.T) { putFn := func(key, value string) { defer batchWaitGroup.Done() put := roachpb.NewPut(roachpb.Key(key), roachpb.MakeValueFromString("c")) - if _, pErr := client.SendWrapped(context.Background(), ds, put); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), ds, put); pErr != nil { t.Errorf("put encountered error: %s", pErr) } } diff --git a/pkg/kv/kvclient/kvcoord/integration_test.go b/pkg/kv/kvclient/kvcoord/integration_test.go index 2fb13964e581..98706a17a8dd 100644 --- a/pkg/kv/kvclient/kvcoord/integration_test.go +++ b/pkg/kv/kvclient/kvcoord/integration_test.go @@ -17,7 +17,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/txnwait" @@ -147,7 +147,7 @@ func TestWaiterOnRejectedCommit(t *testing.T) { // we'll assert that the txn wait queue is told that the transaction // aborted, and we also check that the reader got a nil value. - txn := client.NewTxn(ctx, db, s.NodeID()) + txn := kv.NewTxn(ctx, db, s.NodeID()) keyLeft, keyRight := "a", "c" for _, key := range []string{keyLeft, keyRight} { if err := txn.Put(ctx, key, "val"); err != nil { diff --git a/pkg/kv/kvclient/kvcoord/local_test_cluster_util.go b/pkg/kv/kvclient/kvcoord/local_test_cluster_util.go index 5b5a801cc1ea..d6f4ddebb715 100644 --- a/pkg/kv/kvclient/kvcoord/local_test_cluster_util.go +++ b/pkg/kv/kvclient/kvcoord/local_test_cluster_util.go @@ -16,7 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/rpc/nodedialer" @@ -52,10 +52,10 @@ func InitFactoryForLocalTestCluster( tracer opentracing.Tracer, clock *hlc.Clock, latency time.Duration, - stores client.Sender, + stores kv.Sender, stopper *stop.Stopper, gossip *gossip.Gossip, -) client.TxnSenderFactory { +) kv.TxnSenderFactory { return NewTxnCoordSenderFactory( TxnCoordSenderFactoryConfig{ AmbientCtx: log.AmbientContext{Tracer: st.Tracer}, @@ -74,7 +74,7 @@ func NewDistSenderForLocalTestCluster( tracer opentracing.Tracer, clock *hlc.Clock, latency time.Duration, - stores client.Sender, + stores kv.Sender, stopper *stop.Stopper, g *gossip.Gossip, ) *DistSender { diff --git a/pkg/kv/kvclient/kvcoord/split_test.go b/pkg/kv/kvclient/kvcoord/split_test.go index d1330f16a88f..ac78609b20b1 100644 --- a/pkg/kv/kvclient/kvcoord/split_test.go +++ b/pkg/kv/kvclient/kvcoord/split_test.go @@ -20,8 +20,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" @@ -40,7 +40,7 @@ import ( // random values. If not nil, txnChannel is written to non-blockingly // every time a new transaction starts. func startTestWriter( - db *client.DB, + db *kv.DB, i int64, valBytes int32, wg *sync.WaitGroup, @@ -62,7 +62,7 @@ func startTestWriter( return default: first := true - err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { if first && txnChannel != nil { select { case txnChannel <- struct{}{}: @@ -236,7 +236,7 @@ func TestRangeSplitsWithWritePressure(t *testing.T) { // on the same splitKey succeeds. func TestRangeSplitsWithSameKeyTwice(t *testing.T) { defer leaktest.AfterTest(t)() - s := createTestDBWithContextAndKnobs(t, client.DefaultDBContext(), &kvserver.StoreTestingKnobs{ + s := createTestDBWithContextAndKnobs(t, kv.DefaultDBContext(), &kvserver.StoreTestingKnobs{ DisableScanner: true, DisableSplitQueue: true, DisableMergeQueue: true, @@ -263,7 +263,7 @@ func TestRangeSplitsWithSameKeyTwice(t *testing.T) { // the sticky bit of that range, but no range is split. func TestRangeSplitsStickyBit(t *testing.T) { defer leaktest.AfterTest(t)() - s := createTestDBWithContextAndKnobs(t, client.DefaultDBContext(), &kvserver.StoreTestingKnobs{ + s := createTestDBWithContextAndKnobs(t, kv.DefaultDBContext(), &kvserver.StoreTestingKnobs{ DisableScanner: true, DisableSplitQueue: true, DisableMergeQueue: true, diff --git a/pkg/kv/kvclient/kvcoord/transport.go b/pkg/kv/kvclient/kvcoord/transport.go index baf5669e92c3..909ef6b98270 100644 --- a/pkg/kv/kvclient/kvcoord/transport.go +++ b/pkg/kv/kvclient/kvcoord/transport.go @@ -15,7 +15,7 @@ import ( "sort" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/rpc/nodedialer" @@ -303,7 +303,7 @@ func (h byHealth) Less(i, j int) bool { return h[i].healthy && !h[j].healthy } // SenderTransportFactory wraps a client.Sender for use as a KV // Transport. This is useful for tests that want to use DistSender // without a full RPC stack. -func SenderTransportFactory(tracer opentracing.Tracer, sender client.Sender) TransportFactory { +func SenderTransportFactory(tracer opentracing.Tracer, sender kv.Sender) TransportFactory { return func( _ SendOptions, _ *nodedialer.Dialer, replicas ReplicaSlice, ) (Transport, error) { @@ -315,7 +315,7 @@ func SenderTransportFactory(tracer opentracing.Tracer, sender client.Sender) Tra type senderTransport struct { tracer opentracing.Tracer - sender client.Sender + sender kv.Sender replica roachpb.ReplicaDescriptor called bool diff --git a/pkg/kv/kvclient/kvcoord/txn_coord_sender.go b/pkg/kv/kvclient/kvcoord/txn_coord_sender.go index cb1f948265d6..f213eadd8f35 100644 --- a/pkg/kv/kvclient/kvcoord/txn_coord_sender.go +++ b/pkg/kv/kvclient/kvcoord/txn_coord_sender.go @@ -17,8 +17,8 @@ import ( "runtime/debug" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/util/duration" @@ -147,10 +147,10 @@ type TxnCoordSender struct { // typ specifies whether this transaction is the top level, // or one of potentially many distributed transactions. - typ client.TxnType + typ kv.TxnType } -var _ client.TxnSender = &TxnCoordSender{} +var _ kv.TxnSender = &TxnCoordSender{} // txnInterceptors are pluggable request interceptors that transform requests // and responses and can perform operations in the context of a transaction. A @@ -194,7 +194,7 @@ type txnInterceptor interface { func newRootTxnCoordSender( tcf *TxnCoordSenderFactory, txn *roachpb.Transaction, pri roachpb.UserPriority, -) client.TxnSender { +) kv.TxnSender { txn.AssertInitialized(context.TODO()) if txn.Status != roachpb.PENDING { @@ -205,7 +205,7 @@ func newRootTxnCoordSender( } tcs := &TxnCoordSender{ - typ: client.RootTxn, + typ: kv.RootTxn, TxnCoordSenderFactory: tcf, } tcs.mu.txnState = txnPending @@ -241,7 +241,7 @@ func newRootTxnCoordSender( clock: tcs.clock, txn: &tcs.mu.txn, } - tcs.initCommonInterceptors(tcf, txn, client.RootTxn, riGen) + tcs.initCommonInterceptors(tcf, txn, kv.RootTxn, riGen) // Once the interceptors are initialized, piece them all together in the // correct order. @@ -279,7 +279,7 @@ func newRootTxnCoordSender( } func (tc *TxnCoordSender) initCommonInterceptors( - tcf *TxnCoordSenderFactory, txn *roachpb.Transaction, typ client.TxnType, riGen RangeIteratorGen, + tcf *TxnCoordSenderFactory, txn *roachpb.Transaction, typ kv.TxnType, riGen RangeIteratorGen, ) { tc.interceptorAlloc.txnPipeliner = txnPipeliner{ st: tcf.st, @@ -292,13 +292,13 @@ func (tc *TxnCoordSender) initCommonInterceptors( // because those are the only places where we have all of the // refresh spans. If this is a leaf, as in a distributed sql flow, // we need to propagate the error to the root for an epoch restart. - canAutoRetry: typ == client.RootTxn, + canAutoRetry: typ == kv.RootTxn, autoRetryCounter: tc.metrics.AutoRetries, } tc.interceptorAlloc.txnLockGatekeeper = txnLockGatekeeper{ wrapped: tc.wrapped, mu: &tc.mu.Mutex, - allowConcurrentRequests: typ == client.LeafTxn, + allowConcurrentRequests: typ == kv.LeafTxn, } tc.interceptorAlloc.txnSeqNumAllocator.writeSeq = txn.Sequence } @@ -315,7 +315,7 @@ func (tc *TxnCoordSender) connectInterceptors() { func newLeafTxnCoordSender( tcf *TxnCoordSenderFactory, tis *roachpb.LeafTxnInputState, -) client.TxnSender { +) kv.TxnSender { txn := &tis.Txn // 19.2 roots might have this flag set. In 20.1, the flag is only set by the // server and terminated by the client in the span refresher interceptor. If @@ -334,7 +334,7 @@ func newLeafTxnCoordSender( } tcs := &TxnCoordSender{ - typ: client.LeafTxn, + typ: kv.LeafTxn, TxnCoordSenderFactory: tcf, } tcs.mu.txnState = txnPending @@ -351,7 +351,7 @@ func newLeafTxnCoordSender( if ds, ok := tcf.wrapped.(*DistSender); ok { riGen = ds.rangeIteratorGen } - tcs.initCommonInterceptors(tcf, txn, client.LeafTxn, riGen) + tcs.initCommonInterceptors(tcf, txn, kv.LeafTxn, riGen) // Per-interceptor leaf initialization. If/when more interceptors // need leaf initialization, this should be turned into an interface @@ -603,7 +603,7 @@ func (tc *TxnCoordSender) maybeRejectClientLocked( // See the comment on txnHeartbeater.mu.finalizedStatus for more details. abortedErr := roachpb.NewErrorWithTxn( roachpb.NewTransactionAbortedError(roachpb.ABORT_REASON_CLIENT_REJECT), &tc.mu.txn) - if tc.typ == client.LeafTxn { + if tc.typ == kv.LeafTxn { // Leaf txns return raw retriable errors (which get handled by the // root) rather than TransactionRetryWithProtoRefreshError. return abortedErr @@ -752,7 +752,7 @@ func (tc *TxnCoordSender) updateStateLocked( } if pErr.TransactionRestart != roachpb.TransactionRestart_NONE { - if tc.typ == client.LeafTxn { + if tc.typ == kv.LeafTxn { // Leaves handle retriable errors differently than roots. The leaf // transaction is not supposed to be used any more after a retriable // error. Separately, the error needs to make its way back to the root. @@ -963,7 +963,7 @@ func (tc *TxnCoordSender) Active() bool { // GetLeafTxnInputState is part of the client.TxnSender interface. func (tc *TxnCoordSender) GetLeafTxnInputState( - ctx context.Context, opt client.TxnStatusOpt, + ctx context.Context, opt kv.TxnStatusOpt, ) (roachpb.LeafTxnInputState, error) { tc.mu.Lock() defer tc.mu.Unlock() @@ -990,7 +990,7 @@ func (tc *TxnCoordSender) GetLeafTxnInputState( // GetLeafTxnFinalState is part of the client.TxnSender interface. func (tc *TxnCoordSender) GetLeafTxnFinalState( - ctx context.Context, opt client.TxnStatusOpt, + ctx context.Context, opt kv.TxnStatusOpt, ) (roachpb.LeafTxnFinalState, error) { tc.mu.Lock() defer tc.mu.Unlock() @@ -1018,11 +1018,11 @@ func (tc *TxnCoordSender) GetLeafTxnFinalState( return tfs, nil } -func (tc *TxnCoordSender) checkTxnStatusLocked(ctx context.Context, opt client.TxnStatusOpt) error { +func (tc *TxnCoordSender) checkTxnStatusLocked(ctx context.Context, opt kv.TxnStatusOpt) error { switch opt { - case client.AnyTxnStatus: + case kv.AnyTxnStatus: // Nothing to check. - case client.OnlyPending: + case kv.OnlyPending: // Check the coordinator's proto status. rejectErr := tc.maybeRejectClientLocked(ctx, nil /* ba */) if rejectErr != nil { @@ -1093,7 +1093,7 @@ func (tc *TxnCoordSender) PrepareRetryableError(ctx context.Context, msg string) // Step is part of the TxnSender interface. func (tc *TxnCoordSender) Step(ctx context.Context) error { - if tc.typ != client.RootTxn { + if tc.typ != kv.RootTxn { return errors.WithContextTags( errors.AssertionFailedf("cannot call Step() in leaf txn"), ctx) } @@ -1104,9 +1104,9 @@ func (tc *TxnCoordSender) Step(ctx context.Context) error { // ConfigureStepping is part of the TxnSender interface. func (tc *TxnCoordSender) ConfigureStepping( - ctx context.Context, mode client.SteppingMode, -) (prevMode client.SteppingMode) { - if tc.typ != client.RootTxn { + ctx context.Context, mode kv.SteppingMode, +) (prevMode kv.SteppingMode) { + if tc.typ != kv.RootTxn { panic(errors.WithContextTags( errors.AssertionFailedf("cannot call ConfigureStepping() in leaf txn"), ctx)) } @@ -1116,10 +1116,10 @@ func (tc *TxnCoordSender) ConfigureStepping( } // GetSteppingMode is part of the TxnSender interface. -func (tc *TxnCoordSender) GetSteppingMode(ctx context.Context) (curMode client.SteppingMode) { - curMode = client.SteppingDisabled +func (tc *TxnCoordSender) GetSteppingMode(ctx context.Context) (curMode kv.SteppingMode) { + curMode = kv.SteppingDisabled if tc.interceptorAlloc.txnSeqNumAllocator.steppingModeEnabled { - curMode = client.SteppingEnabled + curMode = kv.SteppingEnabled } return curMode } diff --git a/pkg/kv/kvclient/kvcoord/txn_coord_sender_factory.go b/pkg/kv/kvclient/kvcoord/txn_coord_sender_factory.go index 8ad6da5f2f10..721a1646728c 100644 --- a/pkg/kv/kvclient/kvcoord/txn_coord_sender_factory.go +++ b/pkg/kv/kvclient/kvcoord/txn_coord_sender_factory.go @@ -14,7 +14,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -28,7 +28,7 @@ type TxnCoordSenderFactory struct { log.AmbientContext st *cluster.Settings - wrapped client.Sender + wrapped kv.Sender clock *hlc.Clock heartbeatInterval time.Duration linearizable bool // enables linearizable behavior @@ -38,7 +38,7 @@ type TxnCoordSenderFactory struct { testingKnobs ClientTestingKnobs } -var _ client.TxnSenderFactory = &TxnCoordSenderFactory{} +var _ kv.TxnSenderFactory = &TxnCoordSenderFactory{} // TxnCoordSenderFactoryConfig holds configuration and auxiliary objects that can be passed // to NewTxnCoordSenderFactory. @@ -59,7 +59,7 @@ type TxnCoordSenderFactoryConfig struct { // NewTxnCoordSenderFactory creates a new TxnCoordSenderFactory. The // factory creates new instances of TxnCoordSenders. func NewTxnCoordSenderFactory( - cfg TxnCoordSenderFactoryConfig, wrapped client.Sender, + cfg TxnCoordSenderFactoryConfig, wrapped kv.Sender, ) *TxnCoordSenderFactory { tcf := &TxnCoordSenderFactory{ AmbientContext: cfg.AmbientCtx, @@ -87,19 +87,19 @@ func NewTxnCoordSenderFactory( // RootTransactionalSender is part of the TxnSenderFactory interface. func (tcf *TxnCoordSenderFactory) RootTransactionalSender( txn *roachpb.Transaction, pri roachpb.UserPriority, -) client.TxnSender { +) kv.TxnSender { return newRootTxnCoordSender(tcf, txn, pri) } // LeafTransactionalSender is part of the TxnSenderFactory interface. func (tcf *TxnCoordSenderFactory) LeafTransactionalSender( tis *roachpb.LeafTxnInputState, -) client.TxnSender { +) kv.TxnSender { return newLeafTxnCoordSender(tcf, tis) } // NonTransactionalSender is part of the TxnSenderFactory interface. -func (tcf *TxnCoordSenderFactory) NonTransactionalSender() client.Sender { +func (tcf *TxnCoordSenderFactory) NonTransactionalSender() kv.Sender { return tcf.wrapped } diff --git a/pkg/kv/kvclient/kvcoord/txn_coord_sender_savepoints.go b/pkg/kv/kvclient/kvcoord/txn_coord_sender_savepoints.go index e364218fa5bf..ef5d0a313bc5 100644 --- a/pkg/kv/kvclient/kvcoord/txn_coord_sender_savepoints.go +++ b/pkg/kv/kvclient/kvcoord/txn_coord_sender_savepoints.go @@ -13,7 +13,7 @@ package kvcoord import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented" @@ -46,7 +46,7 @@ type savepoint struct { refreshSpanBytes int64 } -var _ client.SavepointToken = (*savepoint)(nil) +var _ kv.SavepointToken = (*savepoint)(nil) // statically allocated savepoint marking the beginning of a transaction. Used // to avoid allocations for such savepoints. @@ -58,8 +58,8 @@ func (s *savepoint) Initial() bool { } // CreateSavepoint is part of the client.TxnSender interface. -func (tc *TxnCoordSender) CreateSavepoint(ctx context.Context) (client.SavepointToken, error) { - if tc.typ != client.RootTxn { +func (tc *TxnCoordSender) CreateSavepoint(ctx context.Context) (kv.SavepointToken, error) { + if tc.typ != kv.RootTxn { return nil, errors.AssertionFailedf("cannot get savepoint in non-root txn") } @@ -93,8 +93,8 @@ func (tc *TxnCoordSender) CreateSavepoint(ctx context.Context) (client.Savepoint } // RollbackToSavepoint is part of the client.TxnSender interface. -func (tc *TxnCoordSender) RollbackToSavepoint(ctx context.Context, s client.SavepointToken) error { - if tc.typ != client.RootTxn { +func (tc *TxnCoordSender) RollbackToSavepoint(ctx context.Context, s kv.SavepointToken) error { + if tc.typ != kv.RootTxn { return errors.AssertionFailedf("cannot rollback savepoint in non-root txn") } @@ -145,8 +145,8 @@ func (tc *TxnCoordSender) RollbackToSavepoint(ctx context.Context, s client.Save } // ReleaseSavepoint is part of the client.TxnSender interface. -func (tc *TxnCoordSender) ReleaseSavepoint(ctx context.Context, s client.SavepointToken) error { - if tc.typ != client.RootTxn { +func (tc *TxnCoordSender) ReleaseSavepoint(ctx context.Context, s kv.SavepointToken) error { + if tc.typ != kv.RootTxn { return errors.AssertionFailedf("cannot release savepoint in non-root txn") } diff --git a/pkg/kv/kvclient/kvcoord/txn_coord_sender_savepoints_test.go b/pkg/kv/kvclient/kvcoord/txn_coord_sender_savepoints_test.go index ec1a44e3d8f6..9659c74a1cc9 100644 --- a/pkg/kv/kvclient/kvcoord/txn_coord_sender_savepoints_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_coord_sender_savepoints_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -57,8 +57,8 @@ func TestSavepoints(t *testing.T) { defer s.Stopper().Stop(ctx) // Transient state during the test. - sp := make(map[string]client.SavepointToken) - var txn *client.Txn + sp := make(map[string]kv.SavepointToken) + var txn *kv.Txn datadriven.RunTest(t, path, func(t *testing.T, td *datadriven.TestData) string { var buf strings.Builder @@ -77,7 +77,7 @@ func TestSavepoints(t *testing.T) { switch td.Cmd { case "begin": - txn = client.NewTxn(ctx, db, 0) + txn = kv.NewTxn(ctx, db, 0) ptxn() case "commit": diff --git a/pkg/kv/kvclient/kvcoord/txn_coord_sender_server_test.go b/pkg/kv/kvclient/kvcoord/txn_coord_sender_server_test.go index 2641467d4899..6e788ab9d6f2 100644 --- a/pkg/kv/kvclient/kvcoord/txn_coord_sender_server_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_coord_sender_server_test.go @@ -20,7 +20,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" @@ -72,7 +72,7 @@ func TestHeartbeatFindsOutAboutAbortedTransaction(t *testing.T) { push := func(ctx context.Context, key roachpb.Key) error { // Conflicting transaction that pushes the above transaction. - conflictTxn := client.NewTxn(ctx, origDB, 0 /* gatewayNodeID */) + conflictTxn := kv.NewTxn(ctx, origDB, 0 /* gatewayNodeID */) // We need to explicitly set a high priority for the push to happen. if err := conflictTxn.SetUserPriority(roachpb.MaxUserPriority); err != nil { return err @@ -97,8 +97,8 @@ func TestHeartbeatFindsOutAboutAbortedTransaction(t *testing.T) { }, s.DistSenderI().(*kvcoord.DistSender), ) - db := client.NewDB(ambient, tsf, s.Clock()) - txn := client.NewTxn(ctx, db, 0 /* gatewayNodeID */) + db := kv.NewDB(ambient, tsf, s.Clock()) + txn := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) if err := txn.Put(ctx, key, "val"); err != nil { t.Fatal(err) } @@ -159,7 +159,7 @@ func TestNoDuplicateHeartbeatLoops(t *testing.T) { } var attempts int - err := db.Txn(txnCtx, func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(txnCtx, func(ctx context.Context, txn *kv.Txn) error { attempts++ if attempts == 1 { if err := push(context.Background() /* keep the contexts separate */, key); err != nil { diff --git a/pkg/kv/kvclient/kvcoord/txn_coord_sender_test.go b/pkg/kv/kvclient/kvcoord/txn_coord_sender_test.go index d5013703de6f..7ee7f251bb76 100644 --- a/pkg/kv/kvclient/kvcoord/txn_coord_sender_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_coord_sender_test.go @@ -20,7 +20,7 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -47,11 +47,11 @@ func strToValue(s string) *roachpb.Value { // createTestDB creates a local test server and starts it. The caller // is responsible for stopping the test server. func createTestDB(t testing.TB) *localtestcluster.LocalTestCluster { - return createTestDBWithContextAndKnobs(t, client.DefaultDBContext(), nil) + return createTestDBWithContextAndKnobs(t, kv.DefaultDBContext(), nil) } func createTestDBWithContextAndKnobs( - t testing.TB, dbCtx client.DBContext, knobs *kvserver.StoreTestingKnobs, + t testing.TB, dbCtx kv.DBContext, knobs *kvserver.StoreTestingKnobs, ) *localtestcluster.LocalTestCluster { s := &localtestcluster.LocalTestCluster{ DBContext: &dbCtx, @@ -77,7 +77,7 @@ func TestTxnCoordSenderBeginTransaction(t *testing.T) { defer s.Stop() ctx := context.Background() - txn := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) // Put request will create a new transaction. key := roachpb.Key("key") @@ -119,7 +119,7 @@ func TestTxnCoordSenderKeyRanges(t *testing.T) { s := createTestDB(t) defer s.Stop() - txn := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) // Disable txn pipelining so that all write spans are immediately // added to the transaction's lock footprint. if err := txn.DisablePipelining(); err != nil { @@ -243,10 +243,10 @@ func TestTxnCoordSenderCondenseLockSpans(t *testing.T) { }, ds, ) - db := client.NewDB(ambient, tsf, s.Clock) + db := kv.NewDB(ambient, tsf, s.Clock) ctx := context.Background() - txn := client.NewTxn(ctx, db, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) // Disable txn pipelining so that all write spans are immediately // added to the transaction's lock footprint. if err := txn.DisablePipelining(); err != nil { @@ -283,7 +283,7 @@ func TestTxnCoordSenderCondenseLockSpans(t *testing.T) { // Test that the theartbeat loop detects aborted transactions and stops. func TestTxnCoordSenderHeartbeat(t *testing.T) { defer leaktest.AfterTest(t)() - s := createTestDBWithContextAndKnobs(t, client.DefaultDBContext(), &kvserver.StoreTestingKnobs{ + s := createTestDBWithContextAndKnobs(t, kv.DefaultDBContext(), &kvserver.StoreTestingKnobs{ DisableScanner: true, DisableSplitQueue: true, DisableMergeQueue: true, @@ -314,7 +314,7 @@ func TestTxnCoordSenderHeartbeat(t *testing.T) { ambient.Tracer, s.Clock, s.Latency, s.Stores, s.Stopper, s.Gossip, ), ) - quickHeartbeatDB := client.NewDB(ambient, tsf, s.Clock) + quickHeartbeatDB := kv.NewDB(ambient, tsf, s.Clock) // We're going to test twice. In both cases the heartbeat is supposed to // notice that its transaction is aborted, but: @@ -328,7 +328,7 @@ func TestTxnCoordSenderHeartbeat(t *testing.T) { for _, pusherKey := range []roachpb.Key{keyA, keyC} { t.Run(fmt.Sprintf("pusher:%s", pusherKey), func(t *testing.T) { // Make a db with a short heartbeat interval. - initialTxn := client.NewTxn(ctx, quickHeartbeatDB, 0 /* gatewayNodeID */) + initialTxn := kv.NewTxn(ctx, quickHeartbeatDB, 0 /* gatewayNodeID */) tc := initialTxn.Sender().(*TxnCoordSender) if err := initialTxn.Put(ctx, keyA, []byte("value")); err != nil { @@ -358,7 +358,7 @@ func TestTxnCoordSenderHeartbeat(t *testing.T) { // Push our txn with another high-priority txn. { - if err := s.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetUserPriority(roachpb.MaxUserPriority); err != nil { return err } @@ -386,7 +386,7 @@ func TestTxnCoordSenderHeartbeat(t *testing.T) { } // getTxn fetches the requested key and returns the transaction info. -func getTxn(ctx context.Context, txn *client.Txn) (*roachpb.Transaction, *roachpb.Error) { +func getTxn(ctx context.Context, txn *kv.Txn) (*roachpb.Transaction, *roachpb.Error) { txnMeta := txn.TestingCloneTxn().TxnMeta qt := &roachpb.QueryTxnRequest{ RequestHeader: roachpb.RequestHeader{ @@ -440,13 +440,13 @@ func TestTxnCoordSenderEndTxn(t *testing.T) { // 4 cases: no deadline, past deadline, equal deadline, future deadline. for i := 0; i < 4; i++ { key := roachpb.Key("key: " + strconv.Itoa(i)) - txn := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) // Initialize the transaction. if pErr := txn.Put(ctx, key, []byte("value")); pErr != nil { t.Fatal(pErr) } // Conflicting transaction that pushes the above transaction. - conflictTxn := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + conflictTxn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) conflictTxn.TestingSetPriority(enginepb.MaxTxnPriority) if _, pErr := conflictTxn.Get(ctx, key); pErr != nil { t.Fatal(pErr) @@ -526,7 +526,7 @@ func TestTxnCoordSenderAddLockOnError(t *testing.T) { // Create a transaction with intent at "x". key := roachpb.Key("x") - txn := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) tc := txn.Sender().(*TxnCoordSender) // Write so that the coordinator begins tracking this txn. @@ -584,13 +584,13 @@ func TestTxnCoordSenderCleanupOnAborted(t *testing.T) { // Create a transaction with intent at "a". key := roachpb.Key("a") - txn1 := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + txn1 := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) if err := txn1.Put(ctx, key, []byte("value")); err != nil { t.Fatal(err) } // Push the transaction (by writing key "a" with higher priority) to abort it. - txn2 := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + txn2 := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) if err := txn2.SetUserPriority(roachpb.MaxUserPriority); err != nil { t.Fatal(err) } @@ -621,7 +621,7 @@ func TestTxnCoordSenderCleanupOnCommitAfterRestart(t *testing.T) { // Create a transaction with intent at "a". key := roachpb.Key("a") - txn := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) if err := txn.Put(ctx, key, []byte("value")); err != nil { t.Fatal(err) } @@ -656,11 +656,11 @@ func TestTxnCoordSenderGCWithAmbiguousResultErr(t *testing.T) { }, } - s := createTestDBWithContextAndKnobs(t, client.DefaultDBContext(), knobs) + s := createTestDBWithContextAndKnobs(t, kv.DefaultDBContext(), knobs) defer s.Stop() ctx := context.Background() - txn := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) tc := txn.Sender().(*TxnCoordSender) if !errOnFirst { otherKey := roachpb.Key("other") @@ -789,7 +789,7 @@ func TestTxnCoordSenderTxnUpdatedOnError(t *testing.T) { manual := hlc.NewManualClock(origTS.WallTime) clock := hlc.NewClock(manual.UnixNano, 20*time.Nanosecond) - var senderFn client.SenderFunc = func( + var senderFn kv.SenderFunc = func( _ context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { var reply *roachpb.BatchResponse @@ -813,7 +813,7 @@ func TestTxnCoordSenderTxnUpdatedOnError(t *testing.T) { }, senderFn, ) - db := client.NewDB(ambient, tsf, clock) + db := kv.NewDB(ambient, tsf, clock) key := roachpb.Key("test-key") now := clock.Now() origTxnProto := roachpb.MakeTransaction( @@ -832,7 +832,7 @@ func TestTxnCoordSenderTxnUpdatedOnError(t *testing.T) { // for assigning exact priorities doesn't work properly when faced with // updates. origTxnProto.Priority = 1 - txn := client.NewTxnFromProto(ctx, db, 0 /* gatewayNodeID */, now, client.RootTxn, &origTxnProto) + txn := kv.NewTxnFromProto(ctx, db, 0 /* gatewayNodeID */, now, kv.RootTxn, &origTxnProto) txn.TestingSetPriority(1) err := txn.Put(ctx, key, []byte("value")) @@ -878,7 +878,7 @@ func TestTxnMultipleCoord(t *testing.T) { defer s.Stop() ctx := context.Background() - txn := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) // Start the transaction. key := roachpb.Key("a") @@ -888,7 +888,7 @@ func TestTxnMultipleCoord(t *testing.T) { // New create a second, leaf coordinator. leafInputState := txn.GetLeafTxnInputState(ctx) - txn2 := client.NewLeafTxn(ctx, s.DB, 0 /* gatewayNodeID */, &leafInputState) + txn2 := kv.NewLeafTxn(ctx, s.DB, 0 /* gatewayNodeID */, &leafInputState) // Start the second transaction. key2 := roachpb.Key("b") @@ -929,7 +929,7 @@ func TestTxnCoordSenderNoDuplicateLockSpans(t *testing.T) { var expectedLockSpans []roachpb.Span - var senderFn client.SenderFunc = func(_ context.Context, ba roachpb.BatchRequest) ( + var senderFn kv.SenderFunc = func(_ context.Context, ba roachpb.BatchRequest) ( *roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() br.Txn = ba.Txn.Clone() @@ -955,8 +955,8 @@ func TestTxnCoordSenderNoDuplicateLockSpans(t *testing.T) { ) defer stopper.Stop(ctx) - db := client.NewDB(ambient, factory, clock) - txn := client.NewTxn(ctx, db, 0 /* gatewayNodeID */) + db := kv.NewDB(ambient, factory, clock) + txn := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) // Acquire locks on a-b, c, u-w before the final batch. _, pErr := txn.ReverseScanForUpdate(ctx, roachpb.Key("a"), roachpb.Key("b"), 0) @@ -1046,7 +1046,7 @@ func checkTxnMetricsOnce( // have a faster sample interval and returns a cleanup function to be // executed by callers. func setupMetricsTest(t *testing.T) (*localtestcluster.LocalTestCluster, TxnMetrics, func()) { - dbCtx := client.DefaultDBContext() + dbCtx := kv.DefaultDBContext() s := &localtestcluster.LocalTestCluster{ DBContext: &dbCtx, // Liveness heartbeat txns mess up the metrics. @@ -1072,7 +1072,7 @@ func TestTxnCommit(t *testing.T) { value := []byte("value") // Test a write txn commit. - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { key := []byte("key-commit") return txn.Put(ctx, key, value) }); err != nil { @@ -1081,7 +1081,7 @@ func TestTxnCommit(t *testing.T) { checkTxnMetrics(t, metrics, "commit txn", 1 /* commits */, 0 /* commits1PC */, 0, 0) // Test a read-only txn. - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { key := []byte("key-commit") _, err := txn.Get(ctx, key) return err @@ -1101,7 +1101,7 @@ func TestTxnOnePhaseCommit(t *testing.T) { value := []byte("value") ctx := context.TODO() - if err := s.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { key := []byte("key-commit") b := txn.NewBatch() b.Put(key, value) @@ -1135,7 +1135,7 @@ func TestTxnAbortCount(t *testing.T) { intentionalErrText := "intentional error to cause abort" // Test aborted transaction. - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { key := []byte("key-abort") if err := txn.Put(ctx, key, value); err != nil { @@ -1163,7 +1163,7 @@ func TestTxnRestartCount(t *testing.T) { // Start a transaction and read a key that we're going to modify outside the // txn. This ensures that refreshing the txn will not succeed, so a restart // will be necessary. - txn := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) if _, err := txn.Get(ctx, readKey); err != nil { t.Fatal(err) } @@ -1216,7 +1216,7 @@ func TestTxnDurations(t *testing.T) { const incr int64 = 1000 for i := 0; i < puts; i++ { key := roachpb.Key(fmt.Sprintf("key-txn-durations-%d", i)) - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.Put(ctx, key, []byte("val")); err != nil { return err } @@ -1283,7 +1283,7 @@ func TestAbortTransactionOnCommitErrors(t *testing.T) { stopper := stop.NewStopper() defer stopper.Stop(ctx) - var senderFn client.SenderFunc = func( + var senderFn kv.SenderFunc = func( _ context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() @@ -1323,8 +1323,8 @@ func TestAbortTransactionOnCommitErrors(t *testing.T) { senderFn, ) - db := client.NewDB(ambient, factory, clock) - txn := client.NewTxn(ctx, db, 0 /* gatewayNodeID */) + db := kv.NewDB(ambient, factory, clock) + txn := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) if pErr := txn.Put(ctx, "a", "b"); pErr != nil { t.Fatalf("put failed: %s", pErr) } @@ -1356,7 +1356,7 @@ type mockSender struct { matchers []matcher } -var _ client.Sender = &mockSender{} +var _ kv.Sender = &mockSender{} type matcher func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) @@ -1402,7 +1402,7 @@ func TestRollbackErrorStopsHeartbeat(t *testing.T) { }, sender, ) - db := client.NewDB(ambient, factory, clock) + db := kv.NewDB(ambient, factory, clock) sender.match(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if _, ok := ba.GetArg(roachpb.EndTxn); !ok { @@ -1413,11 +1413,11 @@ func TestRollbackErrorStopsHeartbeat(t *testing.T) { return nil, roachpb.NewErrorf("injected err") }) - txn := client.NewTxn(ctx, db, roachpb.NodeID(1)) + txn := kv.NewTxn(ctx, db, roachpb.NodeID(1)) txnHeader := roachpb.Header{ Txn: txn.TestingCloneTxn(), } - if _, pErr := client.SendWrappedWith( + if _, pErr := kv.SendWrappedWith( ctx, txn, txnHeader, &roachpb.PutRequest{ RequestHeader: roachpb.RequestHeader{ Key: roachpb.Key("a"), @@ -1430,7 +1430,7 @@ func TestRollbackErrorStopsHeartbeat(t *testing.T) { t.Fatalf("expected TxnCoordSender to be tracking after the write") } - if _, pErr := client.SendWrappedWith( + if _, pErr := kv.SendWrappedWith( ctx, txn, txnHeader, &roachpb.EndTxnRequest{Commit: false}, ); !testutils.IsPError(pErr, "injected err") { @@ -1469,7 +1469,7 @@ func TestOnePCErrorTracking(t *testing.T) { }, sender, ) - db := client.NewDB(ambient, factory, clock) + db := kv.NewDB(ambient, factory, clock) keyA, keyB, keyC := roachpb.Key("a"), roachpb.Key("b"), roachpb.Key("c") // Register a matcher catching the commit attempt. @@ -1504,7 +1504,7 @@ func TestOnePCErrorTracking(t *testing.T) { return resp, nil }) - txn := client.NewTxn(ctx, db, roachpb.NodeID(1)) + txn := kv.NewTxn(ctx, db, roachpb.NodeID(1)) txnHeader := roachpb.Header{ Txn: txn.TestingCloneTxn(), } @@ -1517,7 +1517,7 @@ func TestOnePCErrorTracking(t *testing.T) { // Now send a rollback and verify that the TxnCoordSender attaches the locks // to it. - if _, pErr := client.SendWrappedWith( + if _, pErr := kv.SendWrappedWith( ctx, txn, txnHeader, &roachpb.EndTxnRequest{Commit: false}, ); pErr != nil { @@ -1562,8 +1562,8 @@ func TestCommitReadOnlyTransaction(t *testing.T) { testutils.RunTrueAndFalse(t, "explicit txn", func(t *testing.T, explicitTxn bool) { testutils.RunTrueAndFalse(t, "with get", func(t *testing.T, withGet bool) { calls = nil - db := client.NewDB(testutils.MakeAmbientCtx(), factory, clock) - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() if withGet { b.Get("foo") @@ -1628,7 +1628,7 @@ func TestCommitMutatingTransaction(t *testing.T) { // Test all transactional write methods. testArgs := []struct { - f func(ctx context.Context, txn *client.Txn) error + f func(ctx context.Context, txn *kv.Txn) error expMethod roachpb.Method // pointWrite is set if the method is a "point write", which means that it // will be pipelined and we should expect a QueryIntent request at commit @@ -1636,17 +1636,17 @@ func TestCommitMutatingTransaction(t *testing.T) { pointWrite bool }{ { - f: func(ctx context.Context, txn *client.Txn) error { return txn.Put(ctx, "a", "b") }, + f: func(ctx context.Context, txn *kv.Txn) error { return txn.Put(ctx, "a", "b") }, expMethod: roachpb.Put, pointWrite: true, }, { - f: func(ctx context.Context, txn *client.Txn) error { return txn.CPut(ctx, "a", "b", nil) }, + f: func(ctx context.Context, txn *kv.Txn) error { return txn.CPut(ctx, "a", "b", nil) }, expMethod: roachpb.ConditionalPut, pointWrite: true, }, { - f: func(ctx context.Context, txn *client.Txn) error { + f: func(ctx context.Context, txn *kv.Txn) error { _, err := txn.Inc(ctx, "a", 1) return err }, @@ -1654,12 +1654,12 @@ func TestCommitMutatingTransaction(t *testing.T) { pointWrite: true, }, { - f: func(ctx context.Context, txn *client.Txn) error { return txn.Del(ctx, "a") }, + f: func(ctx context.Context, txn *kv.Txn) error { return txn.Del(ctx, "a") }, expMethod: roachpb.Delete, pointWrite: true, }, { - f: func(ctx context.Context, txn *client.Txn) error { return txn.DelRange(ctx, "a", "b") }, + f: func(ctx context.Context, txn *kv.Txn) error { return txn.DelRange(ctx, "a", "b") }, expMethod: roachpb.DeleteRange, pointWrite: false, }, @@ -1667,7 +1667,7 @@ func TestCommitMutatingTransaction(t *testing.T) { for i, test := range testArgs { t.Run(test.expMethod.String(), func(t *testing.T) { calls = nil - db := client.NewDB(testutils.MakeAmbientCtx(), factory, clock) + db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) if err := db.Txn(ctx, test.f); err != nil { t.Fatalf("%d: unexpected error on commit: %s", i, err) } @@ -1709,8 +1709,8 @@ func TestAbortReadOnlyTransaction(t *testing.T) { }, sender, ) - db := client.NewDB(testutils.MakeAmbientCtx(), factory, clock) - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) + if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { return errors.New("foo") }); err == nil { t.Fatal("expected error on abort") @@ -1765,13 +1765,13 @@ func TestEndWriteRestartReadOnlyTransaction(t *testing.T) { }, sender, ) - db := client.NewDB(testutils.MakeAmbientCtx(), factory, clock) + db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) testutils.RunTrueAndFalse(t, "write", func(t *testing.T, write bool) { testutils.RunTrueAndFalse(t, "success", func(t *testing.T, success bool) { calls = nil firstIter := true - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if firstIter { firstIter = false var err error @@ -1856,9 +1856,9 @@ func TestTransactionKeyNotChangedInRestart(t *testing.T) { }, sender, ) - db := client.NewDB(testutils.MakeAmbientCtx(), factory, clock) + db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { defer func() { attempt++ }() b := txn.NewBatch() b.Put(keys[attempt], "b") @@ -1911,8 +1911,8 @@ func TestSequenceNumbers(t *testing.T) { }, sender, ) - db := client.NewDB(testutils.MakeAmbientCtx(), factory, clock) - txn := client.NewTxn(ctx, db, 0 /* gatewayNodeID */) + db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) + txn := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) for i := 0; i < 5; i++ { var ba roachpb.BatchRequest @@ -1960,9 +1960,9 @@ func TestConcurrentTxnRequestsProhibited(t *testing.T) { }, sender, ) - db := client.NewDB(ambient, factory, clock) + db := kv.NewDB(ambient, factory, clock) - err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { g, gCtx := errgroup.WithContext(ctx) g.Go(func() error { return txn.Put(gCtx, "test_put", "val") @@ -2001,7 +2001,7 @@ func TestTxnRequestTxnTimestamp(t *testing.T) { }, sender, ) - db := client.NewDB(testutils.MakeAmbientCtx(), factory, clock) + db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) curReq := 0 requests := []struct { @@ -2031,7 +2031,7 @@ func TestTxnRequestTxnTimestamp(t *testing.T) { manual.Set(requests[0].expRequestTS.WallTime) - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { for curReq = range requests { if _, err := txn.Get(ctx, "k"); err != nil { return err @@ -2076,14 +2076,14 @@ func TestReadOnlyTxnObeysDeadline(t *testing.T) { }, sender, ) - db := client.NewDB(testutils.MakeAmbientCtx(), factory, clock) + db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) // We're going to run two tests: one where the EndTxn is by itself in a // batch, one where it is not. As of June 2018, the EndTxn is elided in // different ways in the two cases. t.Run("standalone commit", func(t *testing.T) { - txn := client.NewTxn(ctx, db, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) // Set a deadline. We'll generate a retriable error with a higher timestamp. txn.UpdateDeadlineMaybe(ctx, clock.Now()) if _, err := txn.Get(ctx, "k"); err != nil { @@ -2097,7 +2097,7 @@ func TestReadOnlyTxnObeysDeadline(t *testing.T) { }) t.Run("commit in batch", func(t *testing.T) { - txn := client.NewTxn(ctx, db, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) // Set a deadline. We'll generate a retriable error with a higher timestamp. txn.UpdateDeadlineMaybe(ctx, clock.Now()) b := txn.NewBatch() @@ -2123,7 +2123,7 @@ func TestTxnCoordSenderPipelining(t *testing.T) { distSender := s.DB.GetFactory().(*TxnCoordSenderFactory).NonTransactionalSender() var calls []roachpb.Method - var senderFn client.SenderFunc = func( + var senderFn kv.SenderFunc = func( ctx context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { calls = append(calls, ba.Methods()...) @@ -2141,16 +2141,16 @@ func TestTxnCoordSenderPipelining(t *testing.T) { Clock: s.Clock, Stopper: s.Stopper, }, senderFn) - db := client.NewDB(ambientCtx, tsf, s.Clock) + db := kv.NewDB(ambientCtx, tsf, s.Clock) - err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return txn.Put(ctx, "key", "val") }) if err != nil { t.Fatal(err) } - err = db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err = db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.DisablePipelining(); err != nil { return err } @@ -2165,11 +2165,11 @@ func TestTxnCoordSenderPipelining(t *testing.T) { roachpb.Put, roachpb.EndTxn, }, calls) - for _, action := range []func(ctx context.Context, txn *client.Txn) error{ - func(ctx context.Context, txn *client.Txn) error { return txn.Put(ctx, "key", "val") }, - func(ctx context.Context, txn *client.Txn) error { _, err := txn.Get(ctx, "key"); return err }, + for _, action := range []func(ctx context.Context, txn *kv.Txn) error{ + func(ctx context.Context, txn *kv.Txn) error { return txn.Put(ctx, "key", "val") }, + func(ctx context.Context, txn *kv.Txn) error { _, err := txn.Get(ctx, "key"); return err }, } { - err = db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err = db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := action(ctx, txn); err != nil { t.Fatal(err) } @@ -2196,7 +2196,7 @@ func TestAnchorKey(t *testing.T) { key1 := roachpb.Key("a") key2 := roachpb.Key("b") - var senderFn client.SenderFunc = func( + var senderFn kv.SenderFunc = func( ctx context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if !roachpb.Key(ba.Txn.Key).Equal(key2) { @@ -2219,9 +2219,9 @@ func TestAnchorKey(t *testing.T) { }, senderFn, ) - db := client.NewDB(testutils.MakeAmbientCtx(), factory, clock) + db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { ba := txn.NewBatch() ba.Get(key1) ba.Put(key2, "val") @@ -2256,15 +2256,15 @@ func TestLeafTxnClientRejectError(t *testing.T) { }, } - s := createTestDBWithContextAndKnobs(t, client.DefaultDBContext(), knobs) + s := createTestDBWithContextAndKnobs(t, kv.DefaultDBContext(), knobs) defer s.Stop() ctx := context.Background() - rootTxn := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + rootTxn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) leafInputState := rootTxn.GetLeafTxnInputState(ctx) // New create a second, leaf coordinator. - leafTxn := client.NewLeafTxn(ctx, s.DB, 0 /* gatewayNodeID */, &leafInputState) + leafTxn := kv.NewLeafTxn(ctx, s.DB, 0 /* gatewayNodeID */, &leafInputState) if _, err := leafTxn.Get(ctx, errKey); !testutils.IsError(err, "TransactionAbortedError") { t.Fatalf("expected injected err, got: %v", err) @@ -2286,13 +2286,13 @@ func TestLeafTxnClientRejectError(t *testing.T) { // inconsistent state. See comments in TxnCoordSender.UpdateRootWithLeafFinalState(). func TestUpdateRoootWithLeafFinalStateInAbortedTxn(t *testing.T) { defer leaktest.AfterTest(t)() - s := createTestDBWithContextAndKnobs(t, client.DefaultDBContext(), nil /* knobs */) + s := createTestDBWithContextAndKnobs(t, kv.DefaultDBContext(), nil /* knobs */) defer s.Stop() ctx := context.Background() - txn := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) leafInputState := txn.GetLeafTxnInputState(ctx) - leafTxn := client.NewLeafTxn(ctx, s.DB, 0, &leafInputState) + leafTxn := kv.NewLeafTxn(ctx, s.DB, 0, &leafInputState) finalState, err := leafTxn.GetLeafTxnFinalState(ctx) if err != nil { diff --git a/pkg/kv/kvclient/kvcoord/txn_correctness_test.go b/pkg/kv/kvclient/kvcoord/txn_correctness_test.go index 1853531f0c5c..510afe5f51e6 100644 --- a/pkg/kv/kvclient/kvcoord/txn_correctness_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_correctness_test.go @@ -23,7 +23,7 @@ import ( "sync" "testing" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" @@ -55,16 +55,16 @@ func (re *retryError) Error() string { // enforce an ordering. If a previous wait channel is set, the // command waits on it before execution. type cmd struct { - name string // name of the cmd for debug output - key, endKey string // key and optional endKey - debug string // optional debug string - txnIdx int // transaction index in the history - historyIdx int // this suffixes key so tests get unique keys - expRetry bool // true if we expect a retry - fn func(ctx context.Context, c *cmd, txn *client.Txn) error // execution function - ch chan error // channel for other commands to wait - prev *cmd // this command must wait on previous command before executing - env map[string]int64 // contains all previously read values + name string // name of the cmd for debug output + key, endKey string // key and optional endKey + debug string // optional debug string + txnIdx int // transaction index in the history + historyIdx int // this suffixes key so tests get unique keys + expRetry bool // true if we expect a retry + fn func(ctx context.Context, c *cmd, txn *kv.Txn) error // execution function + ch chan error // channel for other commands to wait + prev *cmd // this command must wait on previous command before executing + env map[string]int64 // contains all previously read values } func (c *cmd) init(prev *cmd) { @@ -84,7 +84,7 @@ func (c *cmd) clone() *cmd { return &clone } -func (c *cmd) execute(txn *client.Txn, t *testing.T) (string, error) { +func (c *cmd) execute(txn *kv.Txn, t *testing.T) (string, error) { if c.prev != nil { if log.V(2) { log.Infof(context.Background(), "%s waiting on %s", c, c.prev) @@ -143,7 +143,7 @@ func (c *cmd) String() string { } // readCmd reads a value from the db and stores it in the env. -func readCmd(ctx context.Context, c *cmd, txn *client.Txn) error { +func readCmd(ctx context.Context, c *cmd, txn *kv.Txn) error { r, err := txn.Get(ctx, c.getKey()) if err != nil { return err @@ -158,17 +158,17 @@ func readCmd(ctx context.Context, c *cmd, txn *client.Txn) error { } // deleteCmd deletes the value at the given key from the db. -func deleteCmd(ctx context.Context, c *cmd, txn *client.Txn) error { +func deleteCmd(ctx context.Context, c *cmd, txn *kv.Txn) error { return txn.Del(ctx, c.getKey()) } // deleteRngCmd deletes the range of values from the db from [key, endKey). -func deleteRngCmd(ctx context.Context, c *cmd, txn *client.Txn) error { +func deleteRngCmd(ctx context.Context, c *cmd, txn *kv.Txn) error { return txn.DelRange(ctx, c.getKey(), c.getEndKey()) } // scanCmd reads the values from the db from [key, endKey). -func scanCmd(ctx context.Context, c *cmd, txn *client.Txn) error { +func scanCmd(ctx context.Context, c *cmd, txn *kv.Txn) error { rows, err := txn.Scan(ctx, c.getKey(), c.getEndKey(), 0) if err != nil { return err @@ -187,7 +187,7 @@ func scanCmd(ctx context.Context, c *cmd, txn *client.Txn) error { // incCmd adds one to the value of c.key in the env (as determined by // a previous read or write, or else assumed to be zero) and writes it // to the db. -func incCmd(ctx context.Context, c *cmd, txn *client.Txn) error { +func incCmd(ctx context.Context, c *cmd, txn *kv.Txn) error { val, ok := c.env[c.key] if !ok { panic(fmt.Sprintf("can't increment key %q; not yet read", c.key)) @@ -205,7 +205,7 @@ func incCmd(ctx context.Context, c *cmd, txn *client.Txn) error { // and writes the value to the db. "c.endKey" here needs to be parsed // in the context of this command, which is a "+"-separated list of // keys from the env or numeric constants to sum. -func writeCmd(ctx context.Context, c *cmd, txn *client.Txn) error { +func writeCmd(ctx context.Context, c *cmd, txn *kv.Txn) error { sum := int64(0) for _, sp := range strings.Split(c.endKey, "+") { if constant, err := strconv.Atoi(sp); err != nil { @@ -220,12 +220,12 @@ func writeCmd(ctx context.Context, c *cmd, txn *client.Txn) error { } // commitCmd commits the transaction. -func commitCmd(ctx context.Context, c *cmd, txn *client.Txn) error { +func commitCmd(ctx context.Context, c *cmd, txn *kv.Txn) error { return txn.Commit(ctx) } type cmdSpec struct { - fn func(ctx context.Context, c *cmd, txn *client.Txn) error + fn func(ctx context.Context, c *cmd, txn *kv.Txn) error re *regexp.Regexp } @@ -580,7 +580,7 @@ func newHistoryVerifier( } } -func (hv *historyVerifier) run(db *client.DB, t *testing.T) { +func (hv *historyVerifier) run(db *kv.DB, t *testing.T) { log.Infof(context.Background(), "verifying all possible histories for the %q anomaly", hv.name) enumPri := enumeratePriorities(len(hv.txns), []enginepb.TxnPriority{1, enginepb.MaxTxnPriority}) enumHis := enumerateHistories(hv.txns, hv.equal) @@ -604,7 +604,7 @@ func (hv *historyVerifier) run(db *client.DB, t *testing.T) { // // This process continues recursively if there are further retries. func (hv *historyVerifier) runHistoryWithRetry( - priorities []enginepb.TxnPriority, cmds []*cmd, db *client.DB, t *testing.T, + priorities []enginepb.TxnPriority, cmds []*cmd, db *kv.DB, t *testing.T, ) error { if err := hv.runHistory(priorities, cmds, db, t); err != nil { if log.V(1) { @@ -638,7 +638,7 @@ func (hv *historyVerifier) runHistoryWithRetry( } func (hv *historyVerifier) runHistory( - priorities []enginepb.TxnPriority, cmds []*cmd, db *client.DB, t *testing.T, + priorities []enginepb.TxnPriority, cmds []*cmd, db *kv.DB, t *testing.T, ) error { hv.idx++ if t.Failed() { @@ -727,11 +727,11 @@ func (hv *historyVerifier) runHistory( } func (hv *historyVerifier) runCmds( - txnName string, cmds []*cmd, db *client.DB, t *testing.T, + txnName string, cmds []*cmd, db *kv.DB, t *testing.T, ) (string, map[string]int64, error) { var strs []string env := map[string]int64{} - err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { txn.SetDebugName(txnName) for _, c := range cmds { c.historyIdx = hv.idx @@ -749,7 +749,7 @@ func (hv *historyVerifier) runCmds( } func (hv *historyVerifier) runTxn( - txnIdx int, priority enginepb.TxnPriority, cmds []*cmd, db *client.DB, t *testing.T, + txnIdx int, priority enginepb.TxnPriority, cmds []*cmd, db *kv.DB, t *testing.T, ) error { var retry int txnName := fmt.Sprintf("txn %d", txnIdx+1) @@ -763,7 +763,7 @@ func (hv *historyVerifier) runTxn( prev.ch <- err } - err := db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { // If this is 2nd attempt, and a retry wasn't expected, return a // retry error which results in further histories being enumerated. if retry++; retry > 1 { @@ -802,7 +802,7 @@ func (hv *historyVerifier) runTxn( } func (hv *historyVerifier) runCmd( - txn *client.Txn, txnIdx, retry int, c *cmd, t *testing.T, + txn *kv.Txn, txnIdx, retry int, c *cmd, t *testing.T, ) (string, error) { fmtStr, err := c.execute(txn, t) cmdStr := fmt.Sprintf(fmtStr, txnIdx+1, retry) diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator.go index 59381b3eb7d4..19801c3046af 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator.go @@ -13,7 +13,7 @@ package kvcoord import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/errors" @@ -152,17 +152,17 @@ func (s *txnSeqNumAllocator) stepLocked(ctx context.Context) error { // behavior needed to provide the documented API semantics of // sender.ConfigureStepping() (see client/sender.go). func (s *txnSeqNumAllocator) configureSteppingLocked( - newMode client.SteppingMode, -) (prevMode client.SteppingMode) { + newMode kv.SteppingMode, +) (prevMode kv.SteppingMode) { prevEnabled := s.steppingModeEnabled - enabled := newMode == client.SteppingEnabled + enabled := newMode == kv.SteppingEnabled s.steppingModeEnabled = enabled if !prevEnabled && enabled { s.readSeq = s.writeSeq } - prevMode = client.SteppingDisabled + prevMode = kv.SteppingDisabled if prevEnabled { - prevMode = client.SteppingEnabled + prevMode = kv.SteppingEnabled } return prevMode } diff --git a/pkg/kv/kvclient/kvcoord/txn_lock_gatekeeper.go b/pkg/kv/kvclient/kvcoord/txn_lock_gatekeeper.go index 59f1af80bea3..7e7ff6ed52c2 100644 --- a/pkg/kv/kvclient/kvcoord/txn_lock_gatekeeper.go +++ b/pkg/kv/kvclient/kvcoord/txn_lock_gatekeeper.go @@ -14,7 +14,7 @@ import ( "context" "sync" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/errors" ) @@ -39,7 +39,7 @@ type lockedSender interface { // receiving a response. It allows the entire txnInterceptor stack to operate // under lock without needing to worry about unlocking at the correct time. type txnLockGatekeeper struct { - wrapped client.Sender + wrapped kv.Sender mu sync.Locker // shared with TxnCoordSender // If set, concurrent requests are allowed. If not set, concurrent requests diff --git a/pkg/kv/kvclient/kvcoord/txn_test.go b/pkg/kv/kvclient/kvcoord/txn_test.go index e4fea895e8ab..575ccc20f0e1 100644 --- a/pkg/kv/kvclient/kvcoord/txn_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_test.go @@ -17,8 +17,8 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/tscache" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -45,14 +45,14 @@ func TestTxnDBBasics(t *testing.T) { for _, commit := range []bool{true, false} { key := []byte(fmt.Sprintf("key-%t", commit)) - err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { // Put transactional value. if err := txn.Put(ctx, key, value); err != nil { return err } // Attempt to read in another txn. - conflictTxn := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + conflictTxn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) conflictTxn.TestingSetPriority(enginepb.MaxTxnPriority) if gr, err := conflictTxn.Get(ctx, key); err != nil { return err @@ -107,7 +107,7 @@ func BenchmarkSingleRoundtripWithLatency(b *testing.B) { key := roachpb.Key("key") b.ResetTimer() for i := 0; i < b.N; i++ { - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.Put(key, fmt.Sprintf("value-%d", i)) return txn.CommitInBatch(ctx, b) @@ -136,13 +136,13 @@ func TestLostUpdate(t *testing.T) { start := make(chan struct{}) go func() { <-start - done <- s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + done <- s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { return txn.Put(ctx, key, "hi") }) }() firstAttempt := true - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { // Issue a read to get initial value. gr, err := txn.Get(ctx, key) if err != nil { @@ -200,7 +200,7 @@ func TestPriorityRatchetOnAbortOrPush(t *testing.T) { defer s.Stop() pushByReading := func(key roachpb.Key) { - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetUserPriority(roachpb.MaxUserPriority); err != nil { t.Fatal(err) } @@ -211,7 +211,7 @@ func TestPriorityRatchetOnAbortOrPush(t *testing.T) { } } abortByWriting := func(key roachpb.Key) { - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetUserPriority(roachpb.MaxUserPriority); err != nil { t.Fatal(err) } @@ -224,7 +224,7 @@ func TestPriorityRatchetOnAbortOrPush(t *testing.T) { // Try both read and write. for _, read := range []bool{true, false} { var iteration int - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { defer func() { iteration++ }() key := roachpb.Key(fmt.Sprintf("read=%t", read)) @@ -278,14 +278,14 @@ func TestTxnTimestampRegression(t *testing.T) { keyA := "a" keyB := "b" - err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { // Put transactional value. if err := txn.Put(ctx, keyA, "value1"); err != nil { return err } // Attempt to read in another txn (this will push timestamp of transaction). - conflictTxn := client.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) + conflictTxn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) conflictTxn.TestingSetPriority(enginepb.MaxTxnPriority) if _, err := conflictTxn.Get(context.TODO(), keyA); err != nil { return err @@ -318,7 +318,7 @@ func TestTxnLongDelayBetweenWritesWithConcurrentRead(t *testing.T) { ch := make(chan struct{}) errChan := make(chan error) go func() { - errChan <- s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + errChan <- s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { // Put transactional value. if err := txn.Put(ctx, keyA, "value1"); err != nil { return err @@ -336,7 +336,7 @@ func TestTxnLongDelayBetweenWritesWithConcurrentRead(t *testing.T) { <-ch // Delay for longer than the cache window. s.Manual.Increment((tscache.MinRetentionWindow + time.Second).Nanoseconds()) - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { // Attempt to get first keyB. gr1, err := txn.Get(ctx, keyB) if err != nil { @@ -370,7 +370,7 @@ func TestTxnLongDelayBetweenWritesWithConcurrentRead(t *testing.T) { // See issue #676 for full details about original bug. func TestTxnRepeatGetWithRangeSplit(t *testing.T) { defer leaktest.AfterTest(t)() - s := createTestDBWithContextAndKnobs(t, client.DefaultDBContext(), &kvserver.StoreTestingKnobs{ + s := createTestDBWithContextAndKnobs(t, kv.DefaultDBContext(), &kvserver.StoreTestingKnobs{ DisableScanner: true, DisableSplitQueue: true, DisableMergeQueue: true, @@ -383,7 +383,7 @@ func TestTxnRepeatGetWithRangeSplit(t *testing.T) { ch := make(chan struct{}) errChan := make(chan error) go func() { - errChan <- s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + errChan <- s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { // Put transactional value. if err := txn.Put(ctx, keyA, "value1"); err != nil { return err @@ -400,7 +400,7 @@ func TestTxnRepeatGetWithRangeSplit(t *testing.T) { // Wait till txnA finish put(a). <-ch - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { // First get keyC, value will be nil. gr1, err := txn.Get(ctx, keyC) if err != nil { @@ -459,7 +459,7 @@ func TestTxnRestartedSerializableTimestampRegression(t *testing.T) { errChan := make(chan error) var count int go func() { - errChan <- s.DB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + errChan <- s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { count++ // Use a low priority for the transaction so that it can be pushed. if err := txn.SetUserPriority(roachpb.MinUserPriority); err != nil { @@ -524,7 +524,7 @@ func TestTxnResolveIntentsFromMultipleEpochs(t *testing.T) { // Launch goroutine to write the three keys on three successive epochs. go func() { var count int - err := s.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := s.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { // Read the write skew key, which will be written by another goroutine // to ensure transaction restarts. if _, err := txn.Get(ctx, writeSkewKey); err != nil { @@ -609,7 +609,7 @@ func TestTxnCommitTimestampAdvancedByRefresh(t *testing.T) { injected := false var refreshTS hlc.Timestamp errKey := roachpb.Key("inject_err") - s := createTestDBWithContextAndKnobs(t, client.DefaultDBContext(), &kvserver.StoreTestingKnobs{ + s := createTestDBWithContextAndKnobs(t, kv.DefaultDBContext(), &kvserver.StoreTestingKnobs{ TestingRequestFilter: func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { if g, ok := ba.GetArg(roachpb.Get); ok && g.(*roachpb.GetRequest).Key.Equal(errKey) { if injected { @@ -629,7 +629,7 @@ func TestTxnCommitTimestampAdvancedByRefresh(t *testing.T) { }) defer s.Stop() - err := s.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := s.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { _, err := txn.Get(ctx, errKey) if err != nil { return err @@ -674,7 +674,7 @@ func TestTxnLeavesIntentBehindAfterWriteTooOldError(t *testing.T) { require.Error(t, err, "WriteTooOld") // Check that the intent was left behind. - b := client.Batch{} + b := kv.Batch{} b.Header.ReadConsistency = roachpb.READ_UNCOMMITTED b.Get(key) require.NoError(t, s.DB.Run(ctx, &b)) diff --git a/pkg/kv/kvnemesis/applier.go b/pkg/kv/kvnemesis/applier.go index 9e5eda63dd21..c2424ca99707 100644 --- a/pkg/kv/kvnemesis/applier.go +++ b/pkg/kv/kvnemesis/applier.go @@ -13,7 +13,7 @@ package kvnemesis import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -24,20 +24,20 @@ import ( // Applier executes Steps. type Applier struct { - dbs []*client.DB + dbs []*kv.DB mu struct { dbIdx int syncutil.Mutex - txns map[string]*client.Txn + txns map[string]*kv.Txn } } // MakeApplier constructs an Applier that executes against the given DB. -func MakeApplier(dbs ...*client.DB) *Applier { +func MakeApplier(dbs ...*kv.DB) *Applier { a := &Applier{ dbs: dbs, } - a.mu.txns = make(map[string]*client.Txn) + a.mu.txns = make(map[string]*kv.Txn) return a } @@ -45,7 +45,7 @@ func MakeApplier(dbs ...*client.DB) *Applier { // error is only returned from Apply if there is an internal coding error within // Applier, errors from a Step execution are saved in the Step itself. func (a *Applier) Apply(ctx context.Context, step *Step) (retErr error) { - var db *client.DB + var db *kv.DB db, step.DBID = a.getNextDBRoundRobin() step.Before = db.Clock().Now() @@ -59,7 +59,7 @@ func (a *Applier) Apply(ctx context.Context, step *Step) (retErr error) { return nil } -func (a *Applier) getNextDBRoundRobin() (*client.DB, int32) { +func (a *Applier) getNextDBRoundRobin() (*kv.DB, int32) { a.mu.Lock() dbIdx := a.mu.dbIdx a.mu.dbIdx = (a.mu.dbIdx + 1) % len(a.dbs) @@ -67,7 +67,7 @@ func (a *Applier) getNextDBRoundRobin() (*client.DB, int32) { return a.dbs[dbIdx], int32(dbIdx) } -func applyOp(ctx context.Context, db *client.DB, op *Operation) { +func applyOp(ctx context.Context, db *kv.DB, op *Operation) { switch o := op.GetValue().(type) { case *GetOperation, *PutOperation, *BatchOperation: applyClientOp(ctx, db, op) @@ -78,13 +78,13 @@ func applyOp(ctx context.Context, db *client.DB, op *Operation) { err := db.AdminMerge(ctx, o.Key) o.Result = resultError(ctx, err) case *ChangeReplicasOperation: - ctx = client.ChangeReplicasCanMixAddAndRemoveContext(ctx) + ctx = kv.ChangeReplicasCanMixAddAndRemoveContext(ctx) desc := getRangeDesc(ctx, o.Key, db) _, err := db.AdminChangeReplicas(ctx, o.Key, desc, o.Changes) // TODO(dan): Save returned desc? o.Result = resultError(ctx, err) case *ClosureTxnOperation: - txnErr := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + txnErr := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { for i := range o.Ops { op := &o.Ops[i] applyClientOp(ctx, txn, op) @@ -117,9 +117,9 @@ func applyOp(ctx context.Context, db *client.DB, op *Operation) { } type clientI interface { - Get(context.Context, interface{}) (client.KeyValue, error) + Get(context.Context, interface{}) (kv.KeyValue, error) Put(context.Context, interface{}, interface{}) error - Run(context.Context, *client.Batch) error + Run(context.Context, *kv.Batch) error } func applyClientOp(ctx context.Context, db clientI, op *Operation) { @@ -142,7 +142,7 @@ func applyClientOp(ctx context.Context, db clientI, op *Operation) { err := db.Put(ctx, o.Key, o.Value) o.Result = resultError(ctx, err) case *BatchOperation: - b := &client.Batch{} + b := &kv.Batch{} applyBatchOp(ctx, b, db.Run, o) default: panic(errors.AssertionFailedf(`unknown batch operation type: %T %v`, o, o)) @@ -150,10 +150,7 @@ func applyClientOp(ctx context.Context, db clientI, op *Operation) { } func applyBatchOp( - ctx context.Context, - b *client.Batch, - runFn func(context.Context, *client.Batch) error, - o *BatchOperation, + ctx context.Context, b *kv.Batch, runFn func(context.Context, *kv.Batch) error, o *BatchOperation, ) { for i := range o.Ops { switch subO := o.Ops[i].GetValue().(type) { @@ -203,12 +200,12 @@ func resultError(ctx context.Context, err error) Result { } } -func getRangeDesc(ctx context.Context, key roachpb.Key, dbs ...*client.DB) roachpb.RangeDescriptor { +func getRangeDesc(ctx context.Context, key roachpb.Key, dbs ...*kv.DB) roachpb.RangeDescriptor { var dbIdx int var opts = retry.Options{} for r := retry.StartWithCtx(ctx, opts); r.Next(); dbIdx = (dbIdx + 1) % len(dbs) { sender := dbs[dbIdx].NonTransactionalSender() - descs, _, err := client.RangeLookup(ctx, sender, key, roachpb.CONSISTENT, 0, false) + descs, _, err := kv.RangeLookup(ctx, sender, key, roachpb.CONSISTENT, 0, false) if err != nil { log.Infof(ctx, "looking up descriptor for %s: %+v", key, err) continue @@ -222,7 +219,7 @@ func getRangeDesc(ctx context.Context, key roachpb.Key, dbs ...*client.DB) roach panic(`unreachable`) } -func newGetReplicasFn(dbs ...*client.DB) GetReplicasFn { +func newGetReplicasFn(dbs ...*kv.DB) GetReplicasFn { ctx := context.Background() return func(key roachpb.Key) []roachpb.ReplicationTarget { desc := getRangeDesc(ctx, key, dbs...) diff --git a/pkg/kv/kvnemesis/kvnemesis.go b/pkg/kv/kvnemesis/kvnemesis.go index 19ac13708505..ce18c782e948 100644 --- a/pkg/kv/kvnemesis/kvnemesis.go +++ b/pkg/kv/kvnemesis/kvnemesis.go @@ -17,7 +17,7 @@ import ( "strings" "sync/atomic" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/util/ctxgroup" "github.com/cockroachdb/cockroach/pkg/util/log" ) @@ -29,7 +29,7 @@ func RunNemesis( rng *rand.Rand, ct ClosedTimestampTargetInterval, config GeneratorConfig, - dbs ...*client.DB, + dbs ...*kv.DB, ) ([]error, error) { const concurrency, numSteps = 5, 30 diff --git a/pkg/kv/kvnemesis/kvnemesis_test.go b/pkg/kv/kvnemesis/kvnemesis_test.go index 160cd74ae384..17a923d6ea5c 100644 --- a/pkg/kv/kvnemesis/kvnemesis_test.go +++ b/pkg/kv/kvnemesis/kvnemesis_test.go @@ -18,7 +18,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -61,7 +61,7 @@ func TestKVNemesisMultiNode(t *testing.T) { ctx := context.Background() tc := testcluster.StartTestCluster(t, numNodes, base.TestClusterArgs{}) defer tc.Stopper().Stop(ctx) - dbs, sqlDBs := make([]*client.DB, numNodes), make([]*gosql.DB, numNodes) + dbs, sqlDBs := make([]*kv.DB, numNodes), make([]*gosql.DB, numNodes) for i := 0; i < numNodes; i++ { dbs[i] = tc.Server(i).DB() sqlDBs[i] = tc.ServerConn(i) diff --git a/pkg/kv/kvnemesis/watcher.go b/pkg/kv/kvnemesis/watcher.go index 09438e75ea57..42c7610533aa 100644 --- a/pkg/kv/kvnemesis/watcher.go +++ b/pkg/kv/kvnemesis/watcher.go @@ -14,7 +14,7 @@ import ( "context" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" @@ -49,7 +49,7 @@ type Watcher struct { // Watch starts a new Watcher over the given span of kvs. See Watcher. func Watch( - ctx context.Context, dbs []*client.DB, ct ClosedTimestampTargetInterval, dataSpan roachpb.Span, + ctx context.Context, dbs []*kv.DB, ct ClosedTimestampTargetInterval, dataSpan roachpb.Span, ) (*Watcher, error) { if len(dbs) < 1 { return nil, errors.New(`at least one db must be given`) @@ -68,7 +68,7 @@ func Watch( dss := make([]*kvcoord.DistSender, len(dbs)) for i := range dbs { sender := dbs[i].NonTransactionalSender() - dss[i] = sender.(*client.CrossRangeTxnWrapperSender).Wrapped().(*kvcoord.DistSender) + dss[i] = sender.(*kv.CrossRangeTxnWrapperSender).Wrapped().(*kvcoord.DistSender) } startTs := firstDB.Clock().Now() diff --git a/pkg/kv/kvserver/addressing.go b/pkg/kv/kvserver/addressing.go index b0041874b00b..dfb0e3492b45 100644 --- a/pkg/kv/kvserver/addressing.go +++ b/pkg/kv/kvserver/addressing.go @@ -13,26 +13,26 @@ package kvserver import ( "bytes" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/pkg/errors" ) -type metaAction func(*client.Batch, roachpb.Key, *roachpb.RangeDescriptor) +type metaAction func(*kv.Batch, roachpb.Key, *roachpb.RangeDescriptor) -func putMeta(b *client.Batch, key roachpb.Key, desc *roachpb.RangeDescriptor) { +func putMeta(b *kv.Batch, key roachpb.Key, desc *roachpb.RangeDescriptor) { b.Put(key, desc) } -func delMeta(b *client.Batch, key roachpb.Key, desc *roachpb.RangeDescriptor) { +func delMeta(b *kv.Batch, key roachpb.Key, desc *roachpb.RangeDescriptor) { b.Del(key) } // splitRangeAddressing creates (or overwrites if necessary) the meta1 // and meta2 range addressing records for the left and right ranges // caused by a split. -func splitRangeAddressing(b *client.Batch, left, right *roachpb.RangeDescriptor) error { +func splitRangeAddressing(b *kv.Batch, left, right *roachpb.RangeDescriptor) error { if err := rangeAddressing(b, left, putMeta); err != nil { return err } @@ -43,7 +43,7 @@ func splitRangeAddressing(b *client.Batch, left, right *roachpb.RangeDescriptor) // addressing records caused by merging and updates the records for // the new merged range. Left is the range descriptor for the "left" // range before merging and merged describes the left to right merge. -func mergeRangeAddressing(b *client.Batch, left, merged *roachpb.RangeDescriptor) error { +func mergeRangeAddressing(b *kv.Batch, left, merged *roachpb.RangeDescriptor) error { if err := rangeAddressing(b, left, delMeta); err != nil { return err } @@ -52,7 +52,7 @@ func mergeRangeAddressing(b *client.Batch, left, merged *roachpb.RangeDescriptor // updateRangeAddressing overwrites the meta1 and meta2 range addressing // records for the descriptor. -func updateRangeAddressing(b *client.Batch, desc *roachpb.RangeDescriptor) error { +func updateRangeAddressing(b *kv.Batch, desc *roachpb.RangeDescriptor) error { return rangeAddressing(b, desc, putMeta) } @@ -70,7 +70,7 @@ func updateRangeAddressing(b *client.Batch, desc *roachpb.RangeDescriptor) error // - meta2(desc.EndKey) // 3a. If desc.StartKey is not normal user key: // - meta1(KeyMax) -func rangeAddressing(b *client.Batch, desc *roachpb.RangeDescriptor, action metaAction) error { +func rangeAddressing(b *kv.Batch, desc *roachpb.RangeDescriptor, action metaAction) error { // 1. handle illegal case of start or end key being meta1. if bytes.HasPrefix(desc.EndKey, keys.Meta1Prefix) || bytes.HasPrefix(desc.StartKey, keys.Meta1Prefix) { diff --git a/pkg/kv/kvserver/addressing_test.go b/pkg/kv/kvserver/addressing_test.go index ad97169f5cb9..25c5c694ce2c 100644 --- a/pkg/kv/kvserver/addressing_test.go +++ b/pkg/kv/kvserver/addressing_test.go @@ -19,8 +19,8 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" @@ -114,7 +114,7 @@ func TestUpdateRangeAddressing(t *testing.T) { for i, test := range testCases { left := &roachpb.RangeDescriptor{RangeID: roachpb.RangeID(i * 2), StartKey: test.leftStart, EndKey: test.leftEnd} right := &roachpb.RangeDescriptor{RangeID: roachpb.RangeID(i*2 + 1), StartKey: test.rightStart, EndKey: test.rightEnd} - b := &client.Batch{} + b := &kv.Batch{} if test.split { if err := splitRangeAddressing(b, left, right); err != nil { t.Fatal(err) @@ -143,9 +143,9 @@ func TestUpdateRangeAddressing(t *testing.T) { }, store.TestSender(), ) - db := client.NewDB(actx, tcsf, store.cfg.Clock) + db := kv.NewDB(actx, tcsf, store.cfg.Clock) ctx := context.Background() - txn := client.NewTxn(ctx, db, 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) if err := txn.Run(ctx, b); err != nil { t.Fatal(err) } @@ -249,7 +249,7 @@ func TestUpdateRangeAddressingSplitMeta1(t *testing.T) { defer leaktest.AfterTest(t)() left := &roachpb.RangeDescriptor{StartKey: roachpb.RKeyMin, EndKey: meta1Key(roachpb.RKey("a"))} right := &roachpb.RangeDescriptor{StartKey: meta1Key(roachpb.RKey("a")), EndKey: roachpb.RKeyMax} - if err := splitRangeAddressing(&client.Batch{}, left, right); err == nil { + if err := splitRangeAddressing(&kv.Batch{}, left, right); err == nil { t.Error("expected failure trying to update addressing records for meta1 split") } } diff --git a/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go b/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go index e03b29a40e1d..e6eb5e1ba5e5 100644 --- a/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go +++ b/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go @@ -20,8 +20,8 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -99,7 +99,7 @@ func TestDBAddSSTable(t *testing.T) { } // if store != nil, assume it is on-disk and check ingestion semantics. -func runTestDBAddSSTable(ctx context.Context, t *testing.T, db *client.DB, store *kvserver.Store) { +func runTestDBAddSSTable(ctx context.Context, t *testing.T, db *kv.DB, store *kvserver.Store) { { key := storage.MVCCKey{Key: []byte("bb"), Timestamp: hlc.Timestamp{WallTime: 2}} data, err := singleKVSSTable(key, roachpb.MakeValueFromString("1").RawBytes) diff --git a/pkg/kv/kvserver/batcheval/eval_context.go b/pkg/kv/kvserver/batcheval/eval_context.go index 781e7fdc840c..f603b10b0a20 100644 --- a/pkg/kv/kvserver/batcheval/eval_context.go +++ b/pkg/kv/kvserver/batcheval/eval_context.go @@ -14,7 +14,7 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/abortspan" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" @@ -51,7 +51,7 @@ type EvalContext interface { Engine() storage.Engine Clock() *hlc.Clock - DB() *client.DB + DB() *kv.DB AbortSpan() *abortspan.AbortSpan GetConcurrencyManager() concurrency.Manager GetLimiters() *Limiters @@ -140,7 +140,7 @@ func (m *mockEvalCtxImpl) Engine() storage.Engine { func (m *mockEvalCtxImpl) Clock() *hlc.Clock { return m.MockEvalCtx.Clock } -func (m *mockEvalCtxImpl) DB() *client.DB { +func (m *mockEvalCtxImpl) DB() *kv.DB { panic("unimplemented") } func (m *mockEvalCtxImpl) GetLimiters() *Limiters { diff --git a/pkg/kv/kvserver/client_atomic_membership_change_test.go b/pkg/kv/kvserver/client_atomic_membership_change_test.go index 0961b1afc602..9ee14358cb18 100644 --- a/pkg/kv/kvserver/client_atomic_membership_change_test.go +++ b/pkg/kv/kvserver/client_atomic_membership_change_test.go @@ -16,7 +16,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -61,7 +61,7 @@ func TestAtomicReplicationChange(t *testing.T) { runChange := func(expDesc roachpb.RangeDescriptor, chgs []roachpb.ReplicationChange) roachpb.RangeDescriptor { t.Helper() desc, err := tc.Servers[0].DB().AdminChangeReplicas( - client.ChangeReplicasCanMixAddAndRemoveContext(ctx), + kv.ChangeReplicasCanMixAddAndRemoveContext(ctx), k, expDesc, chgs, ) require.NoError(t, err) diff --git a/pkg/kv/kvserver/client_lease_test.go b/pkg/kv/kvserver/client_lease_test.go index 9845831e6c62..2babcbd8aba8 100644 --- a/pkg/kv/kvserver/client_lease_test.go +++ b/pkg/kv/kvserver/client_lease_test.go @@ -23,8 +23,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagepb" @@ -59,7 +59,7 @@ func TestStoreRangeLease(t *testing.T) { } for _, splitKey := range splitKeys { splitArgs := adminSplitArgs(splitKey) - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil { t.Fatal(pErr) } } @@ -101,7 +101,7 @@ func TestStoreRangeLeaseSwitcheroo(t *testing.T) { splitKey := roachpb.Key("a") splitArgs := adminSplitArgs(splitKey) - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil { t.Fatal(pErr) } @@ -167,7 +167,7 @@ func TestStoreGossipSystemData(t *testing.T) { splitKey := keys.SystemConfigSplitKey splitArgs := adminSplitArgs(splitKey) - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil { t.Fatal(pErr) } if _, err := mtc.dbs[0].Inc(context.TODO(), splitKey, 1); err != nil { @@ -373,7 +373,7 @@ func TestCannotTransferLeaseToVoterOutgoing(t *testing.T) { go func() { defer wg.Done() _, err = tc.Server(0).DB().AdminChangeReplicas( - client.ChangeReplicasCanMixAddAndRemoveContext(ctx), + kv.ChangeReplicasCanMixAddAndRemoveContext(ctx), scratchStartKey, desc, []roachpb.ReplicationChange{ {ChangeType: roachpb.REMOVE_REPLICA, Target: tc.Target(2)}, {ChangeType: roachpb.ADD_REPLICA, Target: tc.Target(3)}, diff --git a/pkg/kv/kvserver/client_merge_test.go b/pkg/kv/kvserver/client_merge_test.go index fc8dc910cbff..663464b94ecb 100644 --- a/pkg/kv/kvserver/client_merge_test.go +++ b/pkg/kv/kvserver/client_merge_test.go @@ -28,8 +28,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rditer" @@ -74,7 +74,7 @@ func createSplitRanges( ctx context.Context, store *kvserver.Store, ) (*roachpb.RangeDescriptor, *roachpb.RangeDescriptor, error) { args := adminSplitArgs(roachpb.Key("b")) - if _, err := client.SendWrapped(ctx, store.TestSender(), args); err != nil { + if _, err := kv.SendWrapped(ctx, store.TestSender(), args); err != nil { return nil, nil, err.GoError() } @@ -108,7 +108,7 @@ func TestStoreRangeMergeTwoEmptyRanges(t *testing.T) { // Merge the RHS back into the LHS. args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := client.SendWrapped(ctx, store.TestSender(), args) + _, pErr := kv.SendWrapped(ctx, store.TestSender(), args) if pErr != nil { t.Fatal(pErr) } @@ -158,7 +158,7 @@ func TestStoreRangeMergeMetadataCleanup(t *testing.T) { // Write some values left of the proposed split key. pArgs := putArgs(roachpb.Key("aaa"), content) - if _, pErr := client.SendWrapped(ctx, store.TestSender(), pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store.TestSender(), pArgs); pErr != nil { t.Fatal(pErr) } @@ -173,7 +173,7 @@ func TestStoreRangeMergeMetadataCleanup(t *testing.T) { // Write some values right of the split key. pArgs = putArgs(roachpb.Key("ccc"), content) - if _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ RangeID: rhsDesc.RangeID, }, pArgs); pErr != nil { t.Fatal(pErr) @@ -181,7 +181,7 @@ func TestStoreRangeMergeMetadataCleanup(t *testing.T) { // Merge the b range back into the a range. args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - if _, pErr := client.SendWrapped(ctx, store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store.TestSender(), args); pErr != nil { t.Fatal(pErr) } @@ -278,11 +278,11 @@ func mergeWithData(t *testing.T, retries int64) { // Write some values left and right of the proposed split key. pArgs := putArgs(roachpb.Key("aaa"), content) - if _, pErr := client.SendWrapped(ctx, store1.TestSender(), pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store1.TestSender(), pArgs); pErr != nil { t.Fatal(pErr) } pArgs = putArgs(roachpb.Key("ccc"), content) - if _, pErr := client.SendWrappedWith(ctx, store2.TestSender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(ctx, store2.TestSender(), roachpb.Header{ RangeID: rhsDesc.RangeID, }, pArgs); pErr != nil { t.Fatal(pErr) @@ -290,7 +290,7 @@ func mergeWithData(t *testing.T, retries int64) { // Confirm the values are there. gArgs := getArgs(roachpb.Key("aaa")) - if reply, pErr := client.SendWrapped(ctx, store1.TestSender(), gArgs); pErr != nil { + if reply, pErr := kv.SendWrapped(ctx, store1.TestSender(), gArgs); pErr != nil { t.Fatal(pErr) } else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil { t.Fatal(err) @@ -298,7 +298,7 @@ func mergeWithData(t *testing.T, retries int64) { t.Fatalf("actual value %q did not match expected value %q", replyBytes, content) } gArgs = getArgs(roachpb.Key("ccc")) - if reply, pErr := client.SendWrappedWith(ctx, store2.TestSender(), roachpb.Header{ + if reply, pErr := kv.SendWrappedWith(ctx, store2.TestSender(), roachpb.Header{ RangeID: rhsDesc.RangeID, }, gArgs); pErr != nil { t.Fatal(pErr) @@ -310,7 +310,7 @@ func mergeWithData(t *testing.T, retries int64) { // Merge the b range back into the a range. args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - if _, pErr := client.SendWrapped(ctx, store1.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store1.TestSender(), args); pErr != nil { t.Fatal(pErr) } @@ -339,7 +339,7 @@ func mergeWithData(t *testing.T, retries int64) { // Try to get values from after the merge. gArgs = getArgs(roachpb.Key("aaa")) - if reply, pErr := client.SendWrapped(ctx, store1.TestSender(), gArgs); pErr != nil { + if reply, pErr := kv.SendWrapped(ctx, store1.TestSender(), gArgs); pErr != nil { t.Fatal(pErr) } else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil { t.Fatal(err) @@ -347,7 +347,7 @@ func mergeWithData(t *testing.T, retries int64) { t.Fatalf("actual value %q did not match expected value %q", replyBytes, content) } gArgs = getArgs(roachpb.Key("ccc")) - if reply, pErr := client.SendWrappedWith(ctx, store1.TestSender(), roachpb.Header{ + if reply, pErr := kv.SendWrappedWith(ctx, store1.TestSender(), roachpb.Header{ RangeID: rhsRepl.RangeID, }, gArgs); pErr != nil { t.Fatal(pErr) @@ -359,11 +359,11 @@ func mergeWithData(t *testing.T, retries int64) { // Put new values after the merge on both sides. pArgs = putArgs(roachpb.Key("aaaa"), content) - if _, pErr := client.SendWrapped(ctx, store1.TestSender(), pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store1.TestSender(), pArgs); pErr != nil { t.Fatal(pErr) } pArgs = putArgs(roachpb.Key("cccc"), content) - if _, pErr := client.SendWrappedWith(ctx, store1.TestSender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(ctx, store1.TestSender(), roachpb.Header{ RangeID: rhsRepl.RangeID, }, pArgs); pErr != nil { t.Fatal(pErr) @@ -371,7 +371,7 @@ func mergeWithData(t *testing.T, retries int64) { // Try to get the newly placed values. gArgs = getArgs(roachpb.Key("aaaa")) - if reply, pErr := client.SendWrapped(ctx, store1.TestSender(), gArgs); pErr != nil { + if reply, pErr := kv.SendWrapped(ctx, store1.TestSender(), gArgs); pErr != nil { t.Fatal(pErr) } else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil { t.Fatal(err) @@ -379,7 +379,7 @@ func mergeWithData(t *testing.T, retries int64) { t.Fatalf("actual value %q did not match expected value %q", replyBytes, content) } gArgs = getArgs(roachpb.Key("cccc")) - if reply, pErr := client.SendWrapped(ctx, store1.TestSender(), gArgs); pErr != nil { + if reply, pErr := kv.SendWrapped(ctx, store1.TestSender(), gArgs); pErr != nil { t.Fatal(pErr) } else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil { t.Fatal(err) @@ -388,7 +388,7 @@ func mergeWithData(t *testing.T, retries int64) { } gArgs = getArgs(roachpb.Key("cccc")) - if _, pErr := client.SendWrappedWith(ctx, store2, roachpb.Header{ + if _, pErr := kv.SendWrappedWith(ctx, store2, roachpb.Header{ RangeID: rhsDesc.RangeID, }, gArgs); !testutils.IsPError( pErr, `r2 was not found`, @@ -447,7 +447,7 @@ func mergeCheckingTimestampCaches(t *testing.T, disjointLeaseholders bool) { // Write a key to the RHS. rhsKey := roachpb.Key("c") - if _, pErr := client.SendWrappedWith(ctx, rhsStore, roachpb.Header{ + if _, pErr := kv.SendWrappedWith(ctx, rhsStore, roachpb.Header{ RangeID: rhsDesc.RangeID, }, incrementArgs(rhsKey, 1)); pErr != nil { t.Fatal(pErr) @@ -487,7 +487,7 @@ func mergeCheckingTimestampCaches(t *testing.T, disjointLeaseholders bool) { // Merge the RHS back into the LHS. args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - if _, pErr := client.SendWrapped(ctx, lhsStore.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, lhsStore.TestSender(), args); pErr != nil { t.Fatal(pErr) } @@ -599,7 +599,7 @@ func TestStoreRangeMergeTimestampCacheCausality(t *testing.T) { distSender := mtc.distSenders[0] for _, key := range []roachpb.Key{roachpb.Key("a"), roachpb.Key("b")} { - if _, pErr := client.SendWrapped(ctx, distSender, adminSplitArgs(key)); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, adminSplitArgs(key)); pErr != nil { t.Fatal(pErr) } } @@ -621,7 +621,7 @@ func TestStoreRangeMergeTimestampCacheCausality(t *testing.T) { // with our precise clock management on s2, s3, and s4. // Write a key to [b, Max). - if _, pErr := client.SendWrapped(ctx, distSender, incrementArgs(rhsKey, 1)); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, incrementArgs(rhsKey, 1)); pErr != nil { t.Fatal(pErr) } @@ -632,7 +632,7 @@ func TestStoreRangeMergeTimestampCacheCausality(t *testing.T) { // Merge [a, b) and [b, Max). Our request filter above will intercept the // merge and execute a read with a large timestamp immediately before the // Subsume request executes. - if _, pErr := client.SendWrappedWith(ctx, mtc.Store(2), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(ctx, mtc.Store(2), roachpb.Header{ RangeID: lhsRangeID, }, adminMergeArgs(roachpb.Key("a"))); pErr != nil { t.Fatal(pErr) @@ -683,7 +683,7 @@ func TestStoreRangeMergeLastRange(t *testing.T) { store := mtc.Store(0) // Merge last range. - _, pErr := client.SendWrapped(ctx, store.TestSender(), adminMergeArgs(roachpb.KeyMin)) + _, pErr := kv.SendWrapped(ctx, store.TestSender(), adminMergeArgs(roachpb.KeyMin)) if !testutils.IsPError(pErr, "cannot merge final range") { t.Fatalf("expected 'cannot merge final range' error; got %s", pErr) } @@ -739,7 +739,7 @@ func TestStoreRangeMergeTxnFailure(t *testing.T) { {lhsDesc.RangeID, roachpb.Key("aa")}, {rhsDesc.RangeID, roachpb.Key("cc")}, } { - if reply, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ + if reply, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ RangeID: tc.rangeID, }, getArgs(tc.key)); pErr != nil { t.Fatal(pErr) @@ -754,7 +754,7 @@ func TestStoreRangeMergeTxnFailure(t *testing.T) { attemptMerge := func() { t.Helper() args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := client.SendWrapped(ctx, store.TestSender(), args) + _, pErr := kv.SendWrapped(ctx, store.TestSender(), args) if exp := "injected permafail"; !testutils.IsPError(pErr, exp) { t.Fatalf("expected %q error, but got %q", exp, pErr) } @@ -890,22 +890,22 @@ func TestStoreRangeMergeStats(t *testing.T) { // will leave a record on the RHS, and txn3 will leave a record on both. This // tests whether the merge code properly accounts for merging abort span // records for the same transaction. - txn1 := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) + txn1 := kv.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) if err := txn1.Put(ctx, "a-txn1", "val"); err != nil { t.Fatal(err) } - txn2 := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) + txn2 := kv.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) if err := txn2.Put(ctx, "c-txn2", "val"); err != nil { t.Fatal(err) } - txn3 := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) + txn3 := kv.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) if err := txn3.Put(ctx, "a-txn3", "val"); err != nil { t.Fatal(err) } if err := txn3.Put(ctx, "c-txn3", "val"); err != nil { t.Fatal(err) } - hiPriTxn := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) + hiPriTxn := kv.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) hiPriTxn.TestingSetPriority(enginepb.MaxTxnPriority) for _, key := range []string{"a-txn1", "c-txn2", "a-txn3", "c-txn3"} { if err := hiPriTxn.Put(ctx, key, "val"); err != nil { @@ -942,7 +942,7 @@ func TestStoreRangeMergeStats(t *testing.T) { // Merge the b range back into the a range. args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - if _, err := client.SendWrapped(ctx, store.TestSender(), args); err != nil { + if _, err := kv.SendWrapped(ctx, store.TestSender(), args); err != nil { t.Fatal(err) } replMerged := store.LookupReplica(lhsDesc.StartKey) @@ -992,7 +992,7 @@ func TestStoreRangeMergeInFlightTxns(t *testing.T) { } lhsKey, rhsKey := roachpb.Key("aa"), roachpb.Key("cc") - txn := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) // Put the key on the RHS side first so ownership of the transaction record // will need to transfer to the LHS range during the merge. if err := txn.Put(ctx, rhsKey, t.Name()); err != nil { @@ -1002,7 +1002,7 @@ func TestStoreRangeMergeInFlightTxns(t *testing.T) { t.Fatal(err) } args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - if _, pErr := client.SendWrapped(ctx, store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store.TestSender(), args); pErr != nil { t.Fatal(pErr) } if err := txn.Commit(ctx); err != nil { @@ -1030,7 +1030,7 @@ func TestStoreRangeMergeInFlightTxns(t *testing.T) { // Create a transaction that will be aborted before the merge but won't // realize until after the merge. - txn1 := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) + txn1 := kv.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) // Put the key on the RHS side so ownership of the transaction record and // abort span records will need to transfer to the LHS during the merge. if err := txn1.Put(ctx, rhsKey, t.Name()); err != nil { @@ -1038,7 +1038,7 @@ func TestStoreRangeMergeInFlightTxns(t *testing.T) { } // Create and commit a txn that aborts txn1. - txn2 := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) + txn2 := kv.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) txn2.TestingSetPriority(enginepb.MaxTxnPriority) if err := txn2.Put(ctx, rhsKey, "muhahahah"); err != nil { t.Fatal(err) @@ -1049,7 +1049,7 @@ func TestStoreRangeMergeInFlightTxns(t *testing.T) { // Complete the merge. args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - if _, pErr := client.SendWrapped(ctx, store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store.TestSender(), args); pErr != nil { t.Fatal(pErr) } expErr := "TransactionAbortedError(ABORT_REASON_ABORT_SPAN)" @@ -1077,7 +1077,7 @@ func TestStoreRangeMergeInFlightTxns(t *testing.T) { defer txnwait.TestingOverrideTxnLivenessThreshold(2 * testutils.DefaultSucceedsSoonDuration) // Create a transaction that won't complete until after the merge. - txn1 := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) + txn1 := kv.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) // Put the key on the RHS side so ownership of the transaction record and // abort span records will need to transfer to the LHS during the merge. if err := txn1.Put(ctx, rhsKey, t.Name()); err != nil { @@ -1085,7 +1085,7 @@ func TestStoreRangeMergeInFlightTxns(t *testing.T) { } // Create a txn that will conflict with txn1. - txn2 := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) + txn2 := kv.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) txn2ErrCh := make(chan error) go func() { // Get should block on txn1's intent until txn1 commits. @@ -1118,7 +1118,7 @@ func TestStoreRangeMergeInFlightTxns(t *testing.T) { // Complete the merge. args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - if _, pErr := client.SendWrapped(ctx, store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store.TestSender(), args); pErr != nil { t.Fatal(pErr) } @@ -1202,12 +1202,12 @@ func TestStoreRangeMergeSplitRace_MergeWins(t *testing.T) { go func() { time.Sleep(10 * time.Millisecond) splitArgs := adminSplitArgs(rhsDesc.StartKey.AsRawKey().Next()) - _, pErr := client.SendWrapped(ctx, distSender, splitArgs) + _, pErr := kv.SendWrapped(ctx, distSender, splitArgs) splitErrCh <- pErr.GoError() }() mergeArgs := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - if _, pErr := client.SendWrapped(ctx, distSender, mergeArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, mergeArgs); pErr != nil { t.Fatal(pErr) } @@ -1282,7 +1282,7 @@ func TestStoreRangeMergeSplitRace_SplitWins(t *testing.T) { // If this is the first merge attempt, launch the split // before the merge's first write succeeds. if atomic.CompareAndSwapInt64(&launchSplit, 1, 0) { - _, pErr := client.SendWrapped(ctx, distSender, adminSplitArgs(roachpb.Key("c"))) + _, pErr := kv.SendWrapped(ctx, distSender, adminSplitArgs(roachpb.Key("c"))) return pErr } // Otherwise, record that the merge retried and proceed. @@ -1306,7 +1306,7 @@ func TestStoreRangeMergeSplitRace_SplitWins(t *testing.T) { atomic.StoreInt64(&launchSplit, 1) mergeArgs := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - if _, pErr := client.SendWrapped(ctx, distSender, mergeArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, mergeArgs); pErr != nil { t.Fatal(pErr) } if atomic.LoadInt64(&mergeRetries) == 0 { @@ -1390,7 +1390,7 @@ func TestStoreRangeMergeRHSLeaseExpiration(t *testing.T) { mergeErr := make(chan error) go func() { args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := client.SendWrapped(ctx, mtc.stores[0].TestSender(), args) + _, pErr := kv.SendWrapped(ctx, mtc.stores[0].TestSender(), args) mergeErr <- pErr.GoError() }() @@ -1411,7 +1411,7 @@ func TestStoreRangeMergeRHSLeaseExpiration(t *testing.T) { // which this test is not designed to handle. If the merge transaction did // abort then the get requests could complete on r2 before the merge retried. hb, hbH := heartbeatArgs(mergeTxn, mtc.clock.Now()) - if _, pErr := client.SendWrappedWith(ctx, mtc.stores[0].TestSender(), hbH, hb); pErr != nil { + if _, pErr := kv.SendWrappedWith(ctx, mtc.stores[0].TestSender(), hbH, hb); pErr != nil { t.Fatal(pErr) } @@ -1464,7 +1464,7 @@ func TestStoreRangeMergeRHSLeaseExpiration(t *testing.T) { } else { req = putArgs(rhsSentinel, []byte(fmt.Sprintf("val%d", i))) } - _, pErr := client.SendWrappedWith(ctx, mtc.stores[0].TestSender(), roachpb.Header{ + _, pErr := kv.SendWrappedWith(ctx, mtc.stores[0].TestSender(), roachpb.Header{ RangeID: rhsDesc.RangeID, }, req) reqErrs <- pErr.GoError() @@ -1596,7 +1596,7 @@ func TestStoreRangeMergeConcurrentRequests(t *testing.T) { t.Fatal(err) } args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - if _, pErr := client.SendWrapped(ctx, store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store.TestSender(), args); pErr != nil { t.Fatal(pErr) } } @@ -1668,7 +1668,7 @@ func TestStoreReplicaGCAfterMerge(t *testing.T) { mtc.unreplicateRange(rhsDesc.RangeID, 1) args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := client.SendWrapped(ctx, store0.TestSender(), args) + _, pErr := kv.SendWrapped(ctx, store0.TestSender(), args) if pErr != nil { t.Fatal(pErr) } @@ -1900,7 +1900,7 @@ func TestStoreRangeMergeSlowUnabandonedFollower_NoSplit(t *testing.T) { lhsRepl2.RaftLock() args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := client.SendWrapped(ctx, store0.TestSender(), args) + _, pErr := kv.SendWrapped(ctx, store0.TestSender(), args) if pErr != nil { t.Fatal(pErr) } @@ -1957,7 +1957,7 @@ func TestStoreRangeMergeSlowUnabandonedFollower_WithSplit(t *testing.T) { }) args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := client.SendWrapped(ctx, store0.TestSender(), args) + _, pErr := kv.SendWrapped(ctx, store0.TestSender(), args) if pErr != nil { t.Fatal(pErr) } @@ -2029,7 +2029,7 @@ func TestStoreRangeMergeSlowAbandonedFollower(t *testing.T) { lhsRepl2.RaftLock() args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := client.SendWrapped(ctx, store0.TestSender(), args) + _, pErr := kv.SendWrapped(ctx, store0.TestSender(), args) if pErr != nil { t.Fatal(pErr) } @@ -2096,7 +2096,7 @@ func TestStoreRangeMergeAbandonedFollowers(t *testing.T) { keys := []roachpb.RKey{roachpb.RKey("a"), roachpb.RKey("b"), roachpb.RKey("c")} for _, key := range keys { splitArgs := adminSplitArgs(key.AsRawKey()) - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], splitArgs); pErr != nil { t.Fatal(pErr) } } @@ -2122,7 +2122,7 @@ func TestStoreRangeMergeAbandonedFollowers(t *testing.T) { // Merge all three ranges together. store2 won't hear about this merge. for i := 0; i < 2; i++ { - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], adminMergeArgs(roachpb.Key("a"))); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], adminMergeArgs(roachpb.Key("a"))); pErr != nil { t.Fatal(pErr) } } @@ -2222,7 +2222,7 @@ func TestStoreRangeMergeAbandonedFollowersAutomaticallyGarbageCollected(t *testi // goroutine will, however, notice the merge and mark the RHS replica as // destroyed with reason destroyReasonMergePending. args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := client.SendWrapped(ctx, store0.TestSender(), args) + _, pErr := kv.SendWrapped(ctx, store0.TestSender(), args) if pErr != nil { t.Fatal(pErr) } @@ -2268,7 +2268,7 @@ func TestStoreRangeMergeDeadFollowerBeforeTxn(t *testing.T) { mtc.stopStore(2) args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := client.SendWrapped(ctx, store0.TestSender(), args) + _, pErr := kv.SendWrapped(ctx, store0.TestSender(), args) expErr := "waiting for all left-hand replicas to initialize" if !testutils.IsPError(pErr, expErr) { t.Fatalf("expected %q error, but got %v", expErr, pErr) @@ -2301,7 +2301,7 @@ func TestStoreRangeMergeDeadFollowerDuringTxn(t *testing.T) { } args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := client.SendWrapped(ctx, store0.TestSender(), args) + _, pErr := kv.SendWrapped(ctx, store0.TestSender(), args) expErr := "merge failed: waiting for all right-hand replicas to catch up" if !testutils.IsPError(pErr, expErr) { t.Fatalf("expected %q error, but got %v", expErr, pErr) @@ -2351,7 +2351,7 @@ func TestStoreRangeReadoptedLHSFollower(t *testing.T) { if withMerge { // Merge the two ranges together. args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := client.SendWrapped(ctx, store0.TestSender(), args) + _, pErr := kv.SendWrapped(ctx, store0.TestSender(), args) if pErr != nil { t.Fatal(pErr) } @@ -2463,7 +2463,7 @@ func TestStoreRangeMergeUninitializedLHSFollower(t *testing.T) { split := func(key roachpb.RKey) roachpb.RangeID { t.Helper() - if _, pErr := client.SendWrapped(ctx, distSender, adminSplitArgs(key.AsRawKey())); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, adminSplitArgs(key.AsRawKey())); pErr != nil { t.Fatal(pErr) } return store0.LookupReplica(key).RangeID @@ -2537,7 +2537,7 @@ func TestStoreRangeMergeUninitializedLHSFollower(t *testing.T) { // Launch the merge of A and B. mergeErr := make(chan error) go func() { - _, pErr := client.SendWrapped(ctx, distSender, adminMergeArgs(aKey.AsRawKey())) + _, pErr := kv.SendWrapped(ctx, distSender, adminMergeArgs(aKey.AsRawKey())) mergeErr <- pErr.GoError() }() @@ -2667,7 +2667,7 @@ func testMergeWatcher(t *testing.T, injectFailures bool) { lhsRepl2.RaftLock() args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := client.SendWrapped(ctx, store0.TestSender(), args) + _, pErr := kv.SendWrapped(ctx, store0.TestSender(), args) if pErr != nil { t.Fatal(pErr) } @@ -2679,7 +2679,7 @@ func testMergeWatcher(t *testing.T, injectFailures bool) { // and will notice that the merge has committed before the LHS does. getErr := make(chan error) go func() { - _, pErr = client.SendWrappedWith(ctx, store2.TestSender(), roachpb.Header{ + _, pErr = kv.SendWrappedWith(ctx, store2.TestSender(), roachpb.Header{ RangeID: rhsDesc.RangeID, }, getArgs(rhsDesc.StartKey.AsRawKey())) getErr <- pErr.GoError() @@ -2782,7 +2782,7 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { keys := []roachpb.RKey{aKey, bKey, cKey} for _, key := range keys { splitArgs := adminSplitArgs(key.AsRawKey()) - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], splitArgs); pErr != nil { t.Fatal(pErr) } } @@ -2794,7 +2794,7 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { // to B while its blocked because of a stale DistSender cache. for _, key := range keys { for _, distSender := range mtc.distSenders { - if _, pErr := client.SendWrapped(ctx, distSender, getArgs(key.AsRawKey())); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, getArgs(key.AsRawKey())); pErr != nil { t.Fatal(pErr) } } @@ -2809,27 +2809,27 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { // Merge A <- B. mergeArgs := adminMergeArgs(aKey.AsRawKey()) - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], mergeArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], mergeArgs); pErr != nil { t.Fatal(pErr) } // Immediately after the merge completes, send a request to B. getErr := make(chan error) go func() { - _, pErr := client.SendWrappedWith(ctx, store1.TestSender(), roachpb.Header{ + _, pErr := kv.SendWrappedWith(ctx, store1.TestSender(), roachpb.Header{ RangeID: bRangeID, }, getArgs(bKey.AsRawKey())) getErr <- pErr.GoError() }() // Merge AB <- C. - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], mergeArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], mergeArgs); pErr != nil { t.Fatal(pErr) } // Synchronously ensure that the intent on meta2CKey has been cleaned up. // The merge committed, but the intent resolution happens asynchronously. - _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], getArgs(meta2CKey)) + _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], getArgs(meta2CKey)) if pErr != nil { t.Fatal(pErr) } @@ -3038,7 +3038,7 @@ func TestStoreRangeMergeRaftSnapshot(t *testing.T) { // Create three fully-caught-up, adjacent ranges on all three stores. mtc.replicateRange(roachpb.RangeID(1), 1, 2) for _, key := range []roachpb.Key{roachpb.Key("a"), roachpb.Key("b"), roachpb.Key("c")} { - if _, pErr := client.SendWrapped(ctx, distSender, adminSplitArgs(key)); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, adminSplitArgs(key)); pErr != nil { t.Fatal(pErr) } // Manually send the request so we can store the timestamp. @@ -3059,7 +3059,7 @@ func TestStoreRangeMergeRaftSnapshot(t *testing.T) { // clear the keys in [d, /Max). for i := 0; i < 10; i++ { key := roachpb.Key("d" + strconv.Itoa(i)) - if _, pErr := client.SendWrapped(ctx, distSender, incrementArgs(key, 1)); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, incrementArgs(key, 1)); pErr != nil { t.Fatal(pErr) } mtc.waitForValues(key, []int64{1, 1, 1}) @@ -3068,7 +3068,7 @@ func TestStoreRangeMergeRaftSnapshot(t *testing.T) { // Split [d, /Max) into [d, e) and [e, /Max) so we can predict the // contents of [a, d) without having to worry about metadata keys that will // now be in [e, /Max) instead. - if _, pErr := client.SendWrapped(ctx, distSender, adminSplitArgs(roachpb.Key("e"))); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, adminSplitArgs(roachpb.Key("e"))); pErr != nil { t.Fatal(pErr) } @@ -3082,14 +3082,14 @@ func TestStoreRangeMergeRaftSnapshot(t *testing.T) { // Merge [a, b) into [b, c), then [a, c) into [c, /Max). for i := 0; i < 2; i++ { - if _, pErr := client.SendWrapped(ctx, distSender, adminMergeArgs(roachpb.Key("a"))); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, adminMergeArgs(roachpb.Key("a"))); pErr != nil { t.Fatal(pErr) } } // Split [a, /Max) into [a, d) and [d, /Max). This means the Raft snapshot // will span both a merge and a split. - if _, pErr := client.SendWrapped(ctx, distSender, adminSplitArgs(roachpb.Key("d"))); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, adminSplitArgs(roachpb.Key("d"))); pErr != nil { t.Fatal(pErr) } @@ -3107,7 +3107,7 @@ func TestStoreRangeMergeRaftSnapshot(t *testing.T) { Index: index, RangeID: repl.RangeID, } - if _, err := client.SendWrapped(ctx, mtc.distSenders[0], truncArgs); err != nil { + if _, err := kv.SendWrapped(ctx, mtc.distSenders[0], truncArgs); err != nil { t.Fatal(err) } return index @@ -3241,7 +3241,7 @@ func TestStoreRangeMergeDuringShutdown(t *testing.T) { // Simulate a merge transaction by launching a transaction that lays down // intents on the two copies of the RHS range descriptor. - txn := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) + txn := kv.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) if err := txn.Del(ctx, keys.RangeDescriptorKey(rhsDesc.StartKey)); err != nil { t.Fatal(err) } @@ -3302,13 +3302,13 @@ func TestMergeQueue(t *testing.T) { t.Helper() args := adminSplitArgs(key) args.ExpirationTime = expirationTime - if _, pErr := client.SendWrapped(ctx, store.DB().NonTransactionalSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store.DB().NonTransactionalSender(), args); pErr != nil { t.Fatal(pErr) } } clearRange := func(t *testing.T, start, end roachpb.RKey) { - if _, pErr := client.SendWrapped(ctx, store.DB().NonTransactionalSender(), &roachpb.ClearRangeRequest{ + if _, pErr := kv.SendWrapped(ctx, store.DB().NonTransactionalSender(), &roachpb.ClearRangeRequest{ RequestHeader: roachpb.RequestHeader{Key: start.AsRawKey(), EndKey: end.AsRawKey()}, }); pErr != nil { t.Fatal(pErr) @@ -3443,7 +3443,7 @@ func TestMergeQueue(t *testing.T) { Key: rhsStartKey.AsRawKey(), }, } - if _, err := client.SendWrapped(ctx, store.DB().NonTransactionalSender(), unsplitArgs); err != nil { + if _, err := kv.SendWrapped(ctx, store.DB().NonTransactionalSender(), unsplitArgs); err != nil { t.Fatal(err) } store.MustForceMergeScanAndProcess() @@ -3506,7 +3506,7 @@ func TestInvalidSubsumeRequest(t *testing.T) { badRHSDesc.EndKey = badRHSDesc.EndKey.Next() badArgs := getSnapArgs badArgs.RightDesc = badRHSDesc - _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ + _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ RangeID: rhsDesc.RangeID, }, &badArgs) if exp := "RHS range bounds do not match"; !testutils.IsPError(pErr, exp) { @@ -3518,7 +3518,7 @@ func TestInvalidSubsumeRequest(t *testing.T) { { badArgs := getSnapArgs badArgs.LeftDesc.EndKey = badArgs.LeftDesc.EndKey.Next() - _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ + _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ RangeID: rhsDesc.RangeID, }, &badArgs) if exp := "ranges are not adjacent"; !testutils.IsPError(pErr, exp) { @@ -3527,7 +3527,7 @@ func TestInvalidSubsumeRequest(t *testing.T) { } // Subsume without an intent on the local range descriptor should fail. - _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ + _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ RangeID: rhsDesc.RangeID, }, &getSnapArgs) if exp := "range missing intent on its local descriptor"; !testutils.IsPError(pErr, exp) { @@ -3536,13 +3536,13 @@ func TestInvalidSubsumeRequest(t *testing.T) { // Subsume when a non-deletion intent is present on the // local range descriptor should fail. - err = store.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err = store.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.Put(ctx, keys.RangeDescriptorKey(rhsDesc.StartKey), "garbage"); err != nil { return err } // NB: Subsume intentionally takes place outside of the txn so // that it sees an intent rather than the value the txn just wrote. - _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ + _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ RangeID: rhsDesc.RangeID, }, &getSnapArgs) if exp := "non-deletion intent on local range descriptor"; !testutils.IsPError(pErr, exp) { @@ -3578,7 +3578,7 @@ func BenchmarkStoreRangeMerge(b *testing.B) { for i := 0; i < b.N; i++ { // Merge the ranges. b.StartTimer() - if _, err := client.SendWrapped(ctx, store.TestSender(), mArgs); err != nil { + if _, err := kv.SendWrapped(ctx, store.TestSender(), mArgs); err != nil { b.Fatal(err) } diff --git a/pkg/kv/kvserver/client_metrics_test.go b/pkg/kv/kvserver/client_metrics_test.go index a50eb39a29e9..a7a5ad2c7546 100644 --- a/pkg/kv/kvserver/client_metrics_test.go +++ b/pkg/kv/kvserver/client_metrics_test.go @@ -16,8 +16,8 @@ import ( "sync" "testing" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -265,7 +265,7 @@ func TestStoreMetrics(t *testing.T) { // Perform a split, which has special metrics handling. splitArgs := adminSplitArgs(roachpb.Key("m")) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), splitArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), splitArgs); err != nil { t.Fatal(err) } @@ -293,7 +293,7 @@ func TestStoreMetrics(t *testing.T) { verifyStats(t, mtc, 0, 1, 2) // Create a transaction statement that fails. Regression test for #4969. - if err := mtc.dbs[0].Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := mtc.dbs[0].Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() var expVal roachpb.Value expVal.SetInt(6) diff --git a/pkg/kv/kvserver/client_raft_log_queue_test.go b/pkg/kv/kvserver/client_raft_log_queue_test.go index cec0476a1be7..5b380e2e6459 100644 --- a/pkg/kv/kvserver/client_raft_log_queue_test.go +++ b/pkg/kv/kvserver/client_raft_log_queue_test.go @@ -17,7 +17,7 @@ import ( "math" "testing" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/gogo/protobuf/proto" @@ -47,7 +47,7 @@ func TestRaftLogQueue(t *testing.T) { // Write a single value to ensure we have a leader. pArgs := putArgs([]byte("key"), []byte("value")) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), pArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), pArgs); err != nil { t.Fatal(err) } @@ -71,7 +71,7 @@ func TestRaftLogQueue(t *testing.T) { value := bytes.Repeat([]byte("a"), 1000) // 1KB for size := int64(0); size < 2*maxBytes; size += int64(len(value)) { pArgs = putArgs([]byte(fmt.Sprintf("key-%d", size)), value) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), pArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), pArgs); err != nil { t.Fatal(err) } } diff --git a/pkg/kv/kvserver/client_raft_test.go b/pkg/kv/kvserver/client_raft_test.go index a86a32815f4a..0366eff534cd 100644 --- a/pkg/kv/kvserver/client_raft_test.go +++ b/pkg/kv/kvserver/client_raft_test.go @@ -26,8 +26,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" @@ -94,7 +94,7 @@ func TestStoreRecoverFromEngine(t *testing.T) { get := func(store *kvserver.Store, rangeID roachpb.RangeID, key roachpb.Key) int64 { args := getArgs(key) - resp, err := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + resp, err := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ RangeID: rangeID, }, args) if err != nil { @@ -128,7 +128,7 @@ func TestStoreRecoverFromEngine(t *testing.T) { increment := func(rangeID roachpb.RangeID, key roachpb.Key, value int64) (*roachpb.IncrementResponse, *roachpb.Error) { args := incrementArgs(key, value) - resp, err := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + resp, err := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ RangeID: rangeID, }, args) incResp, _ := resp.(*roachpb.IncrementResponse) @@ -142,7 +142,7 @@ func TestStoreRecoverFromEngine(t *testing.T) { t.Fatal(err) } splitArgs := adminSplitArgs(splitKey) - if _, err := client.SendWrapped(context.Background(), store.TestSender(), splitArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), store.TestSender(), splitArgs); err != nil { t.Fatal(err) } rangeID2 = store.LookupReplica(roachpb.RKey(key2)).RangeID @@ -172,11 +172,11 @@ func TestStoreRecoverFromEngine(t *testing.T) { // Raft processing is initialized lazily; issue a no-op write request on each key to // ensure that is has been started. incArgs := incrementArgs(key1, 0) - if _, err := client.SendWrapped(context.Background(), store.TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), store.TestSender(), incArgs); err != nil { t.Fatal(err) } incArgs = incrementArgs(key2, 0) - if _, err := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if _, err := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ RangeID: rangeID2, }, incArgs); err != nil { t.Fatal(err) @@ -219,14 +219,14 @@ func TestStoreRecoverWithErrors(t *testing.T) { // Write a bytes value so the increment will fail. putArgs := putArgs(keyA, []byte("asdf")) - if _, err := client.SendWrapped(context.Background(), store.TestSender(), putArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), store.TestSender(), putArgs); err != nil { t.Fatal(err) } // Try and fail to increment the key. It is important for this test that the // failure be the last thing in the raft log when the store is stopped. incArgs := incrementArgs(keyA, 42) - if _, err := client.SendWrapped(context.Background(), store.TestSender(), incArgs); err == nil { + if _, err := kv.SendWrapped(context.Background(), store.TestSender(), incArgs); err == nil { t.Fatal("did not get expected error") } }() @@ -250,7 +250,7 @@ func TestStoreRecoverWithErrors(t *testing.T) { // Issue a no-op write to lazily initialize raft on the range. keyB := roachpb.Key("b") incArgs := incrementArgs(keyB, 0) - if _, err := client.SendWrapped(context.Background(), store.TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), store.TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -275,7 +275,7 @@ func TestReplicateRange(t *testing.T) { // Issue a command on the first node before replicating. incArgs := incrementArgs([]byte("a"), 5) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -323,7 +323,7 @@ func TestReplicateRange(t *testing.T) { // Verify that the same data is available on the replica. testutils.SucceedsSoon(t, func() error { getArgs := getArgs([]byte("a")) - if reply, err := client.SendWrappedWith(context.Background(), mtc.stores[1].TestSender(), roachpb.Header{ + if reply, err := kv.SendWrappedWith(context.Background(), mtc.stores[1].TestSender(), roachpb.Header{ ReadConsistency: roachpb.INCONSISTENT, }, getArgs); err != nil { return errors.Errorf("failed to read data: %s", err) @@ -366,7 +366,7 @@ func TestRestoreReplicas(t *testing.T) { // Perform an increment before replication to ensure that commands are not // repeated on restarts. incArgs := incrementArgs([]byte("a"), 23) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -383,26 +383,26 @@ func TestRestoreReplicas(t *testing.T) { // Send a command on each store. The original store (the lease holder still) // will succeed. incArgs = incrementArgs([]byte("a"), 5) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } // The follower will return a not lease holder error, indicating the command // should be forwarded to the lease holder. incArgs = incrementArgs([]byte("a"), 11) { - _, pErr := client.SendWrapped(context.Background(), mtc.stores[1].TestSender(), incArgs) + _, pErr := kv.SendWrapped(context.Background(), mtc.stores[1].TestSender(), incArgs) if _, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); !ok { t.Fatalf("expected not lease holder error; got %s", pErr) } } // Send again, this time to first store. - if _, pErr := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); pErr != nil { t.Fatal(pErr) } testutils.SucceedsSoon(t, func() error { getArgs := getArgs([]byte("a")) - if reply, err := client.SendWrappedWith(context.Background(), mtc.stores[1].TestSender(), roachpb.Header{ + if reply, err := kv.SendWrappedWith(context.Background(), mtc.stores[1].TestSender(), roachpb.Header{ ReadConsistency: roachpb.INCONSISTENT, }, getArgs); err != nil { return errors.Errorf("failed to read data: %s", err) @@ -519,7 +519,7 @@ func TestReplicateAfterTruncation(t *testing.T) { // Issue a command on the first node before replicating. incArgs := incrementArgs([]byte("a"), 5) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -532,13 +532,13 @@ func TestReplicateAfterTruncation(t *testing.T) { // Truncate the log at index+1 (log entries < N are removed, so this includes // the increment). truncArgs := truncateLogArgs(index+1, 1) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), truncArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), truncArgs); err != nil { t.Fatal(err) } // Issue a second command post-truncation. incArgs = incrementArgs([]byte("a"), 11) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -554,7 +554,7 @@ func TestReplicateAfterTruncation(t *testing.T) { // Once it catches up, the effects of both commands can be seen. testutils.SucceedsSoon(t, func() error { getArgs := getArgs([]byte("a")) - if reply, err := client.SendWrappedWith(context.Background(), mtc.stores[1].TestSender(), roachpb.Header{ + if reply, err := kv.SendWrappedWith(context.Background(), mtc.stores[1].TestSender(), roachpb.Header{ ReadConsistency: roachpb.INCONSISTENT, }, getArgs); err != nil { return errors.Errorf("failed to read data: %s", err) @@ -579,13 +579,13 @@ func TestReplicateAfterTruncation(t *testing.T) { // Send a third command to verify that the log states are synced up so the // new node can accept new commands. incArgs = incrementArgs([]byte("a"), 23) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } testutils.SucceedsSoon(t, func() error { getArgs := getArgs([]byte("a")) - if reply, err := client.SendWrappedWith(context.Background(), mtc.stores[1].TestSender(), roachpb.Header{ + if reply, err := kv.SendWrappedWith(context.Background(), mtc.stores[1].TestSender(), roachpb.Header{ ReadConsistency: roachpb.INCONSISTENT, }, getArgs); err != nil { return errors.Errorf("failed to read data: %s", err) @@ -616,7 +616,7 @@ func TestRaftLogSizeAfterTruncation(t *testing.T) { key := []byte("a") incArgs := incrementArgs(key, 5) - if _, err := client.SendWrapped( + if _, err := kv.SendWrapped( context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -653,7 +653,7 @@ func TestRaftLogSizeAfterTruncation(t *testing.T) { assert.NoError(t, assertCorrectRaftLogSize()) truncArgs := truncateLogArgs(index+1, 1) - if _, err := client.SendWrapped( + if _, err := kv.SendWrapped( context.Background(), mtc.stores[0].TestSender(), truncArgs); err != nil { t.Fatal(err) } @@ -698,7 +698,7 @@ func TestSnapshotAfterTruncation(t *testing.T) { // key and truncate the raft logs from that command after killing one of the // nodes to check that it gets the new value after it comes up. incArgs := incrementArgs(key, incA) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -711,7 +711,7 @@ func TestSnapshotAfterTruncation(t *testing.T) { mtc.stopStore(stoppedStore) incArgs = incrementArgs(key, incB) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -725,7 +725,7 @@ func TestSnapshotAfterTruncation(t *testing.T) { // Truncate the log at index+1 (log entries < N are removed, so this // includes the increment). truncArgs := truncateLogArgs(index+1, 1) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), truncArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), truncArgs); err != nil { t.Fatal(err) } @@ -860,7 +860,7 @@ func TestSnapshotAfterTruncationWithUncommittedTail(t *testing.T) { // We're then going to continue modifying this key to make sure that the // temporarily partitioned node can continue to receive updates. incArgs := incrementArgs(key, incA) - if _, pErr := client.SendWrapped(ctx, mtc.stores[0].TestSender(), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.stores[0].TestSender(), incArgs); pErr != nil { t.Fatal(pErr) } @@ -918,7 +918,7 @@ func TestSnapshotAfterTruncationWithUncommittedTail(t *testing.T) { cCtx, cancel := context.WithTimeout(ctx, 50*time.Millisecond) defer cancel() incArgsOther := incrementArgs(otherKey, 1) - if _, pErr := client.SendWrapped(cCtx, partReplSender, incArgsOther); pErr == nil { + if _, pErr := kv.SendWrapped(cCtx, partReplSender, incArgsOther); pErr == nil { return errors.New("unexpected success") } else if !testutils.IsPError(pErr, "context deadline exceeded") { return pErr.GoError() @@ -942,7 +942,7 @@ func TestSnapshotAfterTruncationWithUncommittedTail(t *testing.T) { incArgs = incrementArgs(key, incB) testutils.SucceedsSoon(t, func() error { mtc.advanceClock(ctx) - _, pErr := client.SendWrapped(ctx, newLeaderReplSender, incArgs) + _, pErr := kv.SendWrapped(ctx, newLeaderReplSender, incArgs) if _, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); ok { return pErr.GoError() } else if pErr != nil { @@ -962,7 +962,7 @@ func TestSnapshotAfterTruncationWithUncommittedTail(t *testing.T) { truncArgs := truncateLogArgs(index+1, 1) testutils.SucceedsSoon(t, func() error { mtc.advanceClock(ctx) - _, pErr := client.SendWrapped(ctx, newLeaderReplSender, truncArgs) + _, pErr := kv.SendWrapped(ctx, newLeaderReplSender, truncArgs) if _, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); ok { return pErr.GoError() } else if pErr != nil { @@ -1007,7 +1007,7 @@ func TestSnapshotAfterTruncationWithUncommittedTail(t *testing.T) { // Perform another write. The partitioned replica should be able to receive // replicated updates. incArgs = incrementArgs(key, incC) - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], incArgs); pErr != nil { t.Fatal(pErr) } mtc.waitForValues(key, []int64{incABC, incABC, incABC}) @@ -1109,7 +1109,7 @@ func TestConcurrentRaftSnapshots(t *testing.T) { // key and truncate the raft logs from that command after killing one of the // nodes to check that it gets the new value after it comes up. incArgs := incrementArgs(key, incA) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -1123,7 +1123,7 @@ func TestConcurrentRaftSnapshots(t *testing.T) { mtc.stopStore(2) incArgs = incrementArgs(key, incB) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -1137,7 +1137,7 @@ func TestConcurrentRaftSnapshots(t *testing.T) { // Truncate the log at index+1 (log entries < N are removed, so this // includes the increment). truncArgs := truncateLogArgs(index+1, 1) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), truncArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), truncArgs); err != nil { t.Fatal(err) } mtc.restartStore(1) @@ -1278,7 +1278,7 @@ func TestRefreshPendingCommands(t *testing.T) { // Put some data in the range so we'll have something to test for. incArgs := incrementArgs([]byte("a"), 5) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -1288,7 +1288,7 @@ func TestRefreshPendingCommands(t *testing.T) { // Stop node 2; while it is down write some more data. mtc.stopStore(2) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -1305,7 +1305,7 @@ func TestRefreshPendingCommands(t *testing.T) { // Truncate the log at index+1 (log entries < N are removed, so this includes // the increment). truncArgs := truncateLogArgs(index+1, rangeID) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), truncArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), truncArgs); err != nil { t.Fatal(err) } @@ -1370,7 +1370,7 @@ func TestRefreshPendingCommands(t *testing.T) { // Send an increment to the restarted node. If we don't refresh pending // commands appropriately, the range lease command will not get // re-proposed when we discover the new leader. - if _, err := client.SendWrapped(context.Background(), mtc.stores[2].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[2].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -1427,7 +1427,7 @@ func TestLogGrowthWhenRefreshingPendingCommands(t *testing.T) { // Put some data in the range so we'll have something to test for. incArgs := incrementArgs([]byte("a"), 5) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -1482,7 +1482,7 @@ func TestLogGrowthWhenRefreshingPendingCommands(t *testing.T) { putRes := make(chan *roachpb.Error) go func() { putArgs := putArgs([]byte("b"), make([]byte, sc.RaftMaxUncommittedEntriesSize/8)) - _, err := client.SendWrapped(context.Background(), propNode, putArgs) + _, err := kv.SendWrapped(context.Background(), propNode, putArgs) putRes <- err }() @@ -1735,7 +1735,7 @@ func TestProgressWithDownNode(t *testing.T) { mtc.replicateRange(rangeID, 1, 2) incArgs := incrementArgs([]byte("a"), 5) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -1762,7 +1762,7 @@ func TestProgressWithDownNode(t *testing.T) { // Stop one of the replicas and issue a new increment. mtc.stopStore(1) incArgs = incrementArgs([]byte("a"), 11) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -1821,7 +1821,7 @@ func runReplicateRestartAfterTruncation(t *testing.T, removeBeforeTruncateAndReA // Verify that the first increment propagates to all the engines. incArgs := incrementArgs(key, 2) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } mtc.waitForValues(key, []int64{2, 2, 2}) @@ -1847,14 +1847,14 @@ func runReplicateRestartAfterTruncation(t *testing.T, removeBeforeTruncateAndReA // Truncate the log at index+1 (log entries < N are removed, so this includes // the increment). truncArgs := truncateLogArgs(index+1, rangeID) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), truncArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), truncArgs); err != nil { t.Fatal(err) } } // Ensure that store can catch up with the rest of the group. incArgs = incrementArgs(key, 3) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -1936,7 +1936,7 @@ func testReplicaAddRemove(t *testing.T, addFirst bool) { inc1 := int64(5) { incArgs := incrementArgs(key, inc1) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } } @@ -1970,7 +1970,7 @@ func testReplicaAddRemove(t *testing.T, addFirst bool) { inc2 := int64(11) { incArgs := incrementArgs(key, inc2) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } } @@ -1990,7 +1990,7 @@ func testReplicaAddRemove(t *testing.T, addFirst bool) { inc3 := int64(23) { incArgs := incrementArgs(key, inc3) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } } @@ -2375,7 +2375,7 @@ func TestReportUnreachableHeartbeats(t *testing.T) { // Send a command to ensure Raft is aware of lost follower so that it won't // quiesce (which would prevent heartbeats). - if _, err := client.SendWrappedWith( + if _, err := kv.SendWrappedWith( context.Background(), mtc.stores[0].TestSender(), roachpb.Header{RangeID: rangeID}, incrementArgs(roachpb.Key("a"), 1)); err != nil { t.Fatal(err) @@ -2464,7 +2464,7 @@ func TestReplicateAfterSplit(t *testing.T) { store0 := mtc.stores[0] // Make the split splitArgs := adminSplitArgs(splitKey) - if _, err := client.SendWrapped(context.Background(), store0.TestSender(), splitArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), store0.TestSender(), splitArgs); err != nil { t.Fatal(err) } @@ -2474,7 +2474,7 @@ func TestReplicateAfterSplit(t *testing.T) { } // Issue an increment for later check. incArgs := incrementArgs(key, 11) - if _, err := client.SendWrappedWith(context.Background(), store0.TestSender(), roachpb.Header{ + if _, err := kv.SendWrappedWith(context.Background(), store0.TestSender(), roachpb.Header{ RangeID: rangeID2, }, incArgs); err != nil { t.Fatal(err) @@ -2489,7 +2489,7 @@ func TestReplicateAfterSplit(t *testing.T) { testutils.SucceedsSoon(t, func() error { getArgs := getArgs(key) // Reading on non-lease holder replica should use inconsistent read - if reply, err := client.SendWrappedWith(context.Background(), mtc.stores[1].TestSender(), roachpb.Header{ + if reply, err := kv.SendWrappedWith(context.Background(), mtc.stores[1].TestSender(), roachpb.Header{ RangeID: rangeID2, ReadConsistency: roachpb.INCONSISTENT, }, getArgs); err != nil { @@ -2540,14 +2540,14 @@ func TestReplicaRemovalCampaign(t *testing.T) { // Make the split. splitArgs := adminSplitArgs(splitKey) - if _, err := client.SendWrapped(context.Background(), store0.TestSender(), splitArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), store0.TestSender(), splitArgs); err != nil { t.Fatal(err) } replica2 := store0.LookupReplica(roachpb.RKey(key2)) - rg2 := func(s *kvserver.Store) client.Sender { - return client.Wrap(s, func(ba roachpb.BatchRequest) roachpb.BatchRequest { + rg2 := func(s *kvserver.Store) kv.Sender { + return kv.Wrap(s, func(ba roachpb.BatchRequest) roachpb.BatchRequest { if ba.RangeID == 0 { ba.RangeID = replica2.RangeID } @@ -2558,7 +2558,7 @@ func TestReplicaRemovalCampaign(t *testing.T) { // Raft processing is initialized lazily; issue a no-op write request to // ensure that the Raft group has been started. incArgs := incrementArgs(key2, 0) - if _, err := client.SendWrapped(context.Background(), rg2(store0), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), rg2(store0), incArgs); err != nil { t.Fatal(err) } @@ -2617,7 +2617,7 @@ func TestRaftAfterRemoveRange(t *testing.T) { // Make the split. splitArgs := adminSplitArgs(roachpb.Key("b")) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), splitArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), splitArgs); err != nil { t.Fatal(err) } @@ -3119,7 +3119,7 @@ func TestReplicateRogueRemovedNode(t *testing.T) { // Put some data in the range so we'll have something to test for. incArgs := incrementArgs([]byte("a"), 5) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -3133,7 +3133,7 @@ func TestReplicateRogueRemovedNode(t *testing.T) { // Make a write on node 0; this will not be replicated because 0 is the only node left. incArgs = incrementArgs([]byte("a"), 11) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -3182,7 +3182,7 @@ func TestReplicateRogueRemovedNode(t *testing.T) { incArgs := incrementArgs([]byte("a"), 23) startWG.Done() defer finishWG.Done() - _, pErr := client.SendWrappedWith( + _, pErr := kv.SendWrappedWith( context.Background(), mtc.stores[2], roachpb.Header{ @@ -3297,7 +3297,7 @@ func TestReplicateRemovedNodeDisruptiveElection(t *testing.T) { key := roachpb.Key("a") value := int64(5) incArgs := incrementArgs(key, value) - if _, err := client.SendWrapped(context.Background(), mtc.distSenders[1], incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.distSenders[1], incArgs); err != nil { t.Fatal(err) } @@ -3312,7 +3312,7 @@ func TestReplicateRemovedNodeDisruptiveElection(t *testing.T) { Key: roachpb.KeyMin, }, } - reply, pErr := client.SendWrapped(context.Background(), mtc.distSenders[1], req) + reply, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[1], req) if pErr != nil { return pErr.GoError() } @@ -3425,7 +3425,7 @@ func TestReplicaTooOldGC(t *testing.T) { // Put some data in the range so we'll have something to test for. incArgs := incrementArgs([]byte("a"), 5) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } // Wait for all nodes to catch up. @@ -3443,7 +3443,7 @@ func TestReplicaTooOldGC(t *testing.T) { // Perform another write. incArgs = incrementArgs([]byte("a"), 11) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } mtc.waitForValues(roachpb.Key("a"), []int64{16, 16, 16, 5}) @@ -3504,7 +3504,7 @@ func TestReplicaLazyLoad(t *testing.T) { // gossip system table data. splitKey := keys.UserTableDataMin splitArgs := adminSplitArgs(splitKey) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), splitArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), splitArgs); err != nil { t.Fatal(err) } @@ -3551,7 +3551,7 @@ func TestReplicateReAddAfterDown(t *testing.T) { // Put some data in the range so we'll have something to test for. incArgs := incrementArgs([]byte("a"), 5) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } @@ -3565,7 +3565,7 @@ func TestReplicateReAddAfterDown(t *testing.T) { // Perform another write. incArgs = incrementArgs([]byte("a"), 11) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } mtc.waitForValues(roachpb.Key("a"), []int64{16, 16, 5}) @@ -3606,7 +3606,7 @@ func TestLeaseHolderRemoveSelf(t *testing.T) { // Expect that we can still successfully do a get on the range. getArgs := getArgs([]byte("a")) - _, pErr := client.SendWrappedWith(context.Background(), leaseHolder.TestSender(), roachpb.Header{}, getArgs) + _, pErr := kv.SendWrappedWith(context.Background(), leaseHolder.TestSender(), roachpb.Header{}, getArgs) if pErr != nil { t.Fatal(pErr) } @@ -3646,7 +3646,7 @@ func TestRemovedReplicaError(t *testing.T) { // start seeing the RangeNotFoundError after a little bit of time has passed. getArgs := getArgs([]byte("a")) testutils.SucceedsSoon(t, func() error { - _, pErr := client.SendWrappedWith(context.Background(), mtc.stores[0].TestSender(), roachpb.Header{}, getArgs) + _, pErr := kv.SendWrappedWith(context.Background(), mtc.stores[0].TestSender(), roachpb.Header{}, getArgs) switch pErr.GetDetail().(type) { case *roachpb.AmbiguousResultError: return pErr.GoError() @@ -3695,7 +3695,7 @@ func TestTransferRaftLeadership(t *testing.T) { { // Split off a range to avoid interacting with the initial splits. splitArgs := adminSplitArgs(key) - if _, err := client.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); err != nil { t.Fatal(err) } } @@ -3720,7 +3720,7 @@ func TestTransferRaftLeadership(t *testing.T) { } getArgs := getArgs([]byte("a")) - if _, pErr := client.SendWrappedWith( + if _, pErr := kv.SendWrappedWith( context.Background(), store0, roachpb.Header{RangeID: repl0.RangeID}, getArgs, ); pErr != nil { t.Fatalf("expect get nil, actual get %v ", pErr) @@ -3737,7 +3737,7 @@ func TestTransferRaftLeadership(t *testing.T) { origCount0 := store0.Metrics().RangeRaftLeaderTransfers.Count() for { mtc.advanceClock(context.TODO()) - if _, pErr := client.SendWrappedWith( + if _, pErr := kv.SendWrappedWith( context.Background(), store1, roachpb.Header{RangeID: repl0.RangeID}, getArgs, ); pErr == nil { break @@ -3780,7 +3780,7 @@ func TestRaftBlockedReplica(t *testing.T) { // Create 2 ranges by splitting range 1. splitArgs := adminSplitArgs(roachpb.Key("b")) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), splitArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), splitArgs); err != nil { t.Fatal(err) } @@ -3816,7 +3816,7 @@ func TestRaftBlockedReplica(t *testing.T) { // Verify we can still perform operations on the non-blocked replica. incArgs := incrementArgs([]byte("a"), 5) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); err != nil { t.Fatal(err) } mtc.waitForValues(roachpb.Key("a"), []int64{5, 5, 5}) @@ -3921,7 +3921,7 @@ func TestInitRaftGroupOnRequest(t *testing.T) { // gossip system table data. splitKey := keys.UserTableDataMin splitArgs := adminSplitArgs(splitKey) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), splitArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), splitArgs); err != nil { t.Fatal(err) } @@ -3952,7 +3952,7 @@ func TestInitRaftGroupOnRequest(t *testing.T) { // Send an increment and verify that initializes the Raft group. incArgs := incrementArgs(splitKey, 1) - _, pErr := client.SendWrappedWith( + _, pErr := kv.SendWrappedWith( context.Background(), mtc.stores[storeIdx], roachpb.Header{RangeID: repl.RangeID}, incArgs, ) if _, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); !ok { @@ -4127,7 +4127,7 @@ func TestStoreRangeWaitForApplication(t *testing.T) { // Split off a non-system range so we don't have to account for node liveness // traffic. splitArgs := adminSplitArgs(roachpb.Key("a")) - if _, pErr := client.SendWrapped(ctx, distSender, splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, splitArgs); pErr != nil { t.Fatal(pErr) } rangeID := store0.LookupReplica(roachpb.RKey("a")).RangeID @@ -4194,7 +4194,7 @@ func TestStoreRangeWaitForApplication(t *testing.T) { // been issued. putArgs := putArgs(roachpb.Key("foo"), []byte("bar")) for i := 0; i < count-1; i++ { - if _, pErr := client.SendWrapped(ctx, distSender, putArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, putArgs); pErr != nil { t.Fatal(pErr) } // Wait a little bit to increase the likelihood that we observe an invalid @@ -4210,7 +4210,7 @@ func TestStoreRangeWaitForApplication(t *testing.T) { } // Once the `count`th command has been issued, the request should return. - if _, pErr := client.SendWrapped(ctx, distSender, putArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, distSender, putArgs); pErr != nil { t.Fatal(pErr) } for i, errCh := range errChs { @@ -4592,7 +4592,7 @@ func TestAckWriteBeforeApplication(t *testing.T) { go func() { ctx := context.Background() put := putArgs(key, []byte("val")) - _, pErr := client.SendWrappedWith(ctx, mtc.stores[0].TestSender(), roachpb.Header{ + _, pErr := kv.SendWrappedWith(ctx, mtc.stores[0].TestSender(), roachpb.Header{ Timestamp: magicTS, }, put) ch <- pErr @@ -4737,13 +4737,13 @@ func TestProcessSplitAfterRightHandSideHasBeenRemoved(t *testing.T) { } ctx := context.Background() - increment := func(t *testing.T, db *client.DB, key roachpb.Key, by int64) { - b := &client.Batch{} + increment := func(t *testing.T, db *kv.DB, key roachpb.Key, by int64) { + b := &kv.Batch{} b.AddRawRequest(incrementArgs(key, by)) require.NoError(t, db.Run(ctx, b)) } changeReplicas := func( - t *testing.T, db *client.DB, typ roachpb.ReplicaChangeType, key roachpb.Key, idx int, + t *testing.T, db *kv.DB, typ roachpb.ReplicaChangeType, key roachpb.Key, idx int, ) error { ri, err := getRangeInfo(ctx, db, key) require.NoError(t, err) @@ -4751,8 +4751,8 @@ func TestProcessSplitAfterRightHandSideHasBeenRemoved(t *testing.T) { roachpb.MakeReplicationChanges(typ, makeReplicationTargets(idx+1)...)) return err } - split := func(t *testing.T, db *client.DB, key roachpb.Key) { - b := &client.Batch{} + split := func(t *testing.T, db *kv.DB, key roachpb.Key) { + b := &kv.Batch{} b.AddRawRequest(adminSplitArgs(key)) require.NoError(t, db.Run(ctx, b)) } @@ -4813,7 +4813,7 @@ func TestProcessSplitAfterRightHandSideHasBeenRemoved(t *testing.T) { // split to succeed and the RHS to eventually also be on all 3 nodes. setup := func(t *testing.T) ( mtc *multiTestContext, - db *client.DB, + db *kv.DB, keyA, keyB roachpb.Key, lhsID roachpb.RangeID, lhsPartition *mtcPartitionedRange, @@ -5191,7 +5191,7 @@ func TestReplicaRemovalClosesProposalQuota(t *testing.T) { go func(i int) { defer wg.Done() k := append(key[0:len(key):len(key)], strconv.Itoa(i)...) - _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ RangeID: desc.RangeID, }, putArgs(k, bytes.Repeat([]byte{'a'}, 1000))) require.Regexp(t, diff --git a/pkg/kv/kvserver/client_replica_test.go b/pkg/kv/kvserver/client_replica_test.go index c823334571b0..d141a5e16b9a 100644 --- a/pkg/kv/kvserver/client_replica_test.go +++ b/pkg/kv/kvserver/client_replica_test.go @@ -24,8 +24,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" @@ -87,7 +87,7 @@ func TestRangeCommandClockUpdate(t *testing.T) { manuals[0].Increment(int64(500 * time.Millisecond)) incArgs := incrementArgs([]byte("a"), 5) ts := clocks[0].Now() - if _, err := client.SendWrappedWith(context.Background(), mtc.stores[0].TestSender(), roachpb.Header{Timestamp: ts}, incArgs); err != nil { + if _, err := kv.SendWrappedWith(context.Background(), mtc.stores[0].TestSender(), roachpb.Header{Timestamp: ts}, incArgs); err != nil { t.Fatal(err) } @@ -141,7 +141,7 @@ func TestRejectFutureCommand(t *testing.T) { clockOffset := clock.MaxOffset() / numCmds for i := int64(1); i <= numCmds; i++ { ts := ts1.Add(i*clockOffset.Nanoseconds(), 0) - if _, err := client.SendWrappedWith(context.Background(), mtc.stores[0].TestSender(), roachpb.Header{Timestamp: ts}, incArgs); err != nil { + if _, err := kv.SendWrappedWith(context.Background(), mtc.stores[0].TestSender(), roachpb.Header{Timestamp: ts}, incArgs); err != nil { t.Fatal(err) } } @@ -152,7 +152,7 @@ func TestRejectFutureCommand(t *testing.T) { } // Once the accumulated offset reaches MaxOffset, commands will be rejected. - _, pErr := client.SendWrappedWith(context.Background(), mtc.stores[0].TestSender(), roachpb.Header{Timestamp: ts1.Add(clock.MaxOffset().Nanoseconds()+1, 0)}, incArgs) + _, pErr := kv.SendWrappedWith(context.Background(), mtc.stores[0].TestSender(), roachpb.Header{Timestamp: ts1.Add(clock.MaxOffset().Nanoseconds()+1, 0)}, incArgs) if !testutils.IsPError(pErr, "remote wall time is too far ahead") { t.Fatalf("unexpected error %v", pErr) } @@ -260,7 +260,7 @@ func TestTxnPutOutOfOrder(t *testing.T) { // Start a txn that does read-after-write. // The txn will be restarted twice, and the out-of-order put // will happen in the second epoch. - errChan <- store.DB().Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + errChan <- store.DB().Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { epoch++ if epoch == 1 { @@ -324,7 +324,7 @@ func TestTxnPutOutOfOrder(t *testing.T) { Timestamp: cfg.Clock.Now(), UserPriority: priority, } - if _, err := client.SendWrappedWith( + if _, err := kv.SendWrappedWith( context.Background(), store.TestSender(), h, &roachpb.GetRequest{RequestHeader: requestHeader}, ); err != nil { t.Fatalf("failed to get: %+v", err) @@ -334,7 +334,7 @@ func TestTxnPutOutOfOrder(t *testing.T) { RequestHeader: roachpb.RequestHeader{Key: roachpb.Key(restartKey)}, Value: roachpb.MakeValueFromBytes([]byte("restart-value")), } - if _, err := client.SendWrappedWith(context.Background(), store.TestSender(), h, putReq); err != nil { + if _, err := kv.SendWrappedWith(context.Background(), store.TestSender(), h, putReq); err != nil { t.Fatalf("failed to put: %+v", err) } @@ -349,12 +349,12 @@ func TestTxnPutOutOfOrder(t *testing.T) { manual.Increment(100) h.Timestamp = cfg.Clock.Now() - if _, err := client.SendWrappedWith( + if _, err := kv.SendWrappedWith( context.Background(), store.TestSender(), h, &roachpb.GetRequest{RequestHeader: requestHeader}, ); err == nil { t.Fatal("unexpected success of get") } - if _, err := client.SendWrappedWith(context.Background(), store.TestSender(), h, putReq); err != nil { + if _, err := kv.SendWrappedWith(context.Background(), store.TestSender(), h, putReq); err != nil { t.Fatalf("failed to put: %+v", err) } @@ -395,7 +395,7 @@ func TestRangeLookupUseReverse(t *testing.T) { } for _, split := range splits { - _, pErr := client.SendWrapped(context.Background(), store.TestSender(), split) + _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), split) if pErr != nil { t.Fatalf("%q: split unexpected error: %s", split.SplitKey, pErr) } @@ -409,7 +409,7 @@ func TestRangeLookupUseReverse(t *testing.T) { }, } testutils.SucceedsSoon(t, func() error { - _, pErr := client.SendWrapped(context.Background(), store.TestSender(), &scanArgs) + _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &scanArgs) return pErr.GoError() }) @@ -480,7 +480,7 @@ func TestRangeLookupUseReverse(t *testing.T) { for _, test := range testCases { t.Run(fmt.Sprintf("key=%s", test.key), func(t *testing.T) { - rs, preRs, err := client.RangeLookup(context.Background(), store.TestSender(), + rs, preRs, err := kv.RangeLookup(context.Background(), store.TestSender(), test.key.AsRawKey(), roachpb.READ_UNCOMMITTED, test.maxResults-1, true /* prefetchReverse */) if err != nil { t.Fatalf("LookupRange error: %+v", err) @@ -561,7 +561,7 @@ func setupLeaseTransferTest(t *testing.T) *leaseTransferTest { // First, do a write; we'll use it to determine when the dust has settled. l.leftKey = roachpb.Key("a") incArgs := incrementArgs(l.leftKey, 1) - if _, pErr := client.SendWrapped(context.Background(), l.mtc.distSenders[0], incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), l.mtc.distSenders[0], incArgs); pErr != nil { t.Fatal(pErr) } @@ -596,7 +596,7 @@ func (l *leaseTransferTest) sendRead(storeIdx int) *roachpb.Error { if err != nil { return roachpb.NewError(err) } - _, pErr := client.SendWrappedWith( + _, pErr := kv.SendWrappedWith( context.Background(), l.mtc.senders[storeIdx], roachpb.Header{RangeID: desc.RangeID, Replica: replicaDesc}, @@ -931,7 +931,7 @@ func TestRangeLimitTxnMaxTimestamp(t *testing.T) { mtc.Start(t, 2) // Do a write on node1 to establish a key with its timestamp @t=100. - if _, pErr := client.SendWrapped( + if _, pErr := kv.SendWrapped( context.Background(), mtc.distSenders[0], putArgs(keyA, []byte("value")), ); pErr != nil { t.Fatal(pErr) @@ -967,7 +967,7 @@ func TestRangeLimitTxnMaxTimestamp(t *testing.T) { // we would end up incorrectly reading nothing for keyA. Instead we // expect to see an uncertainty interval error. h := roachpb.Header{Txn: &txn} - if _, pErr := client.SendWrappedWith( + if _, pErr := kv.SendWrappedWith( context.Background(), mtc.distSenders[0], h, getArgs(keyA), ); !testutils.IsPError(pErr, "uncertainty") { t.Fatalf("expected an uncertainty interval error; got %v", pErr) @@ -1012,7 +1012,7 @@ func TestLeaseMetricsOnSplitAndTransfer(t *testing.T) { // Split the key space at key "a". splitKey := roachpb.RKey("a") splitArgs := adminSplitArgs(splitKey.AsRawKey()) - if _, pErr := client.SendWrapped( + if _, pErr := kv.SendWrapped( context.Background(), mtc.stores[0].TestSender(), splitArgs, ); pErr != nil { t.Fatal(pErr) @@ -1113,7 +1113,7 @@ func TestLeaseNotUsedAfterRestart(t *testing.T) { key := []byte("a") // Send a read, to acquire a lease. getArgs := getArgs(key) - if _, err := client.SendWrapped(ctx, mtc.stores[0].TestSender(), getArgs); err != nil { + if _, err := kv.SendWrapped(ctx, mtc.stores[0].TestSender(), getArgs); err != nil { t.Fatal(err) } @@ -1138,7 +1138,7 @@ func TestLeaseNotUsedAfterRestart(t *testing.T) { // Send another read and check that the pre-existing lease has not been used. // Concretely, we check that a new lease is requested. - if _, err := client.SendWrapped(ctx, mtc.stores[0].TestSender(), getArgs); err != nil { + if _, err := kv.SendWrapped(ctx, mtc.stores[0].TestSender(), getArgs); err != nil { t.Fatal(err) } // Check that the Send above triggered a lease acquisition. @@ -1201,7 +1201,7 @@ func TestLeaseExtensionNotBlockedByRead(t *testing.T) { Key: key, }, } - if _, pErr := client.SendWrappedWith(context.Background(), s.DB().NonTransactionalSender(), + if _, pErr := kv.SendWrappedWith(context.Background(), s.DB().NonTransactionalSender(), roachpb.Header{UserPriority: 42}, &getReq); pErr != nil { errChan <- pErr.GoError() @@ -1244,7 +1244,7 @@ func TestLeaseExtensionNotBlockedByRead(t *testing.T) { } leaseReq.PrevLease = curLease - _, pErr := client.SendWrapped(context.Background(), s.DB().NonTransactionalSender(), &leaseReq) + _, pErr := kv.SendWrapped(context.Background(), s.DB().NonTransactionalSender(), &leaseReq) if _, ok := pErr.GetDetail().(*roachpb.AmbiguousResultError); ok { log.Infof(context.Background(), "retrying lease after %s", pErr) continue @@ -1268,7 +1268,7 @@ func TestLeaseExtensionNotBlockedByRead(t *testing.T) { // LeaseInfo runs a LeaseInfoRequest using the specified server. func LeaseInfo( t *testing.T, - db *client.DB, + db *kv.DB, rangeDesc roachpb.RangeDescriptor, readConsistency roachpb.ReadConsistencyType, ) roachpb.LeaseInfoResponse { @@ -1277,7 +1277,7 @@ func LeaseInfo( Key: rangeDesc.StartKey.AsRawKey(), }, } - reply, pErr := client.SendWrappedWith(context.Background(), db.NonTransactionalSender(), roachpb.Header{ + reply, pErr := kv.SendWrappedWith(context.Background(), db.NonTransactionalSender(), roachpb.Header{ ReadConsistency: readConsistency, }, leaseInfoReq) if pErr != nil { @@ -1377,7 +1377,7 @@ func TestLeaseInfoRequest(t *testing.T) { Key: rangeDesc.StartKey.AsRawKey(), }, } - reply, pErr := client.SendWrappedWith( + reply, pErr := kv.SendWrappedWith( context.Background(), s, roachpb.Header{ RangeID: rangeDesc.RangeID, ReadConsistency: roachpb.INCONSISTENT, @@ -1431,7 +1431,7 @@ func TestErrorHandlingForNonKVCommand(t *testing.T) { Key: key, }, } - _, pErr := client.SendWrappedWith( + _, pErr := kv.SendWrappedWith( context.Background(), s.DB().NonTransactionalSender(), roachpb.Header{UserPriority: 42}, @@ -1462,7 +1462,7 @@ func TestRangeInfo(t *testing.T) { // Split the key space at key "a". splitKey := roachpb.RKey("a") splitArgs := adminSplitArgs(splitKey.AsRawKey()) - if _, pErr := client.SendWrapped( + if _, pErr := kv.SendWrapped( context.Background(), mtc.stores[0].TestSender(), splitArgs, ); pErr != nil { t.Fatal(pErr) @@ -1487,7 +1487,7 @@ func TestRangeInfo(t *testing.T) { // Verify range info is not set if unrequested. getArgs := getArgs(splitKey.AsRawKey()) - reply, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], getArgs) + reply, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], getArgs) if pErr != nil { t.Fatal(pErr) } @@ -1499,7 +1499,7 @@ func TestRangeInfo(t *testing.T) { h := roachpb.Header{ ReturnRangeInfo: true, } - reply, pErr = client.SendWrappedWith(context.Background(), mtc.distSenders[0], h, getArgs) + reply, pErr = kv.SendWrappedWith(context.Background(), mtc.distSenders[0], h, getArgs) if pErr != nil { t.Fatal(pErr) } @@ -1515,7 +1515,7 @@ func TestRangeInfo(t *testing.T) { // Verify range info on a put request. putArgs := putArgs(splitKey.AsRawKey(), []byte("foo")) - reply, pErr = client.SendWrappedWith(context.Background(), mtc.distSenders[0], h, putArgs) + reply, pErr = kv.SendWrappedWith(context.Background(), mtc.distSenders[0], h, putArgs) if pErr != nil { t.Fatal(pErr) } @@ -1530,7 +1530,7 @@ func TestRangeInfo(t *testing.T) { }, Target: rhsLease.Replica.StoreID, } - reply, pErr = client.SendWrappedWith(context.Background(), mtc.distSenders[0], h, adminArgs) + reply, pErr = kv.SendWrappedWith(context.Background(), mtc.distSenders[0], h, adminArgs) if pErr != nil { t.Fatal(pErr) } @@ -1547,7 +1547,7 @@ func TestRangeInfo(t *testing.T) { } txn := roachpb.MakeTransaction("test", roachpb.KeyMin, 1, mtc.clock.Now(), 0) h.Txn = &txn - reply, pErr = client.SendWrappedWith(context.Background(), mtc.distSenders[0], h, &scanArgs) + reply, pErr = kv.SendWrappedWith(context.Background(), mtc.distSenders[0], h, &scanArgs) if pErr != nil { t.Fatal(pErr) } @@ -1572,7 +1572,7 @@ func TestRangeInfo(t *testing.T) { EndKey: roachpb.KeyMax, }, } - reply, pErr = client.SendWrappedWith(context.Background(), mtc.distSenders[0], h, &revScanArgs) + reply, pErr = kv.SendWrappedWith(context.Background(), mtc.distSenders[0], h, &revScanArgs) if pErr != nil { t.Fatal(pErr) } @@ -1601,7 +1601,7 @@ func TestRangeInfo(t *testing.T) { t.Fatalf("unable to transfer lease to replica %s: %+v", r, err) } } - reply, pErr = client.SendWrappedWith(context.Background(), mtc.distSenders[0], h, &scanArgs) + reply, pErr = kv.SendWrappedWith(context.Background(), mtc.distSenders[0], h, &scanArgs) if pErr != nil { t.Fatal(pErr) } @@ -1810,7 +1810,7 @@ func TestClearRange(t *testing.T) { clearRange := func(start, end roachpb.Key) { t.Helper() - if _, err := client.SendWrapped(ctx, store.DB().NonTransactionalSender(), &roachpb.ClearRangeRequest{ + if _, err := kv.SendWrapped(ctx, store.DB().NonTransactionalSender(), &roachpb.ClearRangeRequest{ RequestHeader: roachpb.RequestHeader{ Key: start, EndKey: end, @@ -1900,17 +1900,17 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { // First, do a couple of writes; we'll use these to determine when // the dust has settled. incA := incrementArgs(keyA, 1) - if _, pErr := client.SendWrapped(ctx, mtc.stores[0].TestSender(), incA); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.stores[0].TestSender(), incA); pErr != nil { t.Fatal(pErr) } incC := incrementArgs(keyC, 2) - if _, pErr := client.SendWrapped(ctx, mtc.stores[0].TestSender(), incC); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.stores[0].TestSender(), incC); pErr != nil { t.Fatal(pErr) } // Split the system range from the rest of the keyspace. splitArgs := adminSplitArgs(keys.SystemMax) - if _, pErr := client.SendWrapped(ctx, mtc.stores[0].TestSender(), splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.stores[0].TestSender(), splitArgs); pErr != nil { t.Fatal(pErr) } @@ -1929,7 +1929,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { // and the write will be attempted on the new leaseholder (node 2). // It should not succeed because it should run into the timestamp cache. db := mtc.dbs[0] - txnOld := client.NewTxn(ctx, db, 0 /* gatewayNodeID */) + txnOld := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) // Perform a write with txnOld so that its timestamp gets set. if _, err := txnOld.Inc(ctx, keyB, 3); err != nil { @@ -1948,7 +1948,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { // we're regression testing against here still existed, we would not have // to do this. hb, hbH := heartbeatArgs(txnOld.TestingCloneTxn(), mtc.clock.Now()) - if _, pErr := client.SendWrappedWith(ctx, mtc.stores[0].TestSender(), hbH, hb); pErr != nil { + if _, pErr := kv.SendWrappedWith(ctx, mtc.stores[0].TestSender(), hbH, hb); pErr != nil { t.Fatal(pErr) } @@ -1967,7 +1967,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { RaftMessageHandler: store2, }) - if _, pErr := client.SendWrapped(ctx, mtc.stores[0].TestSender(), incC); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.stores[0].TestSender(), incC); pErr != nil { t.Fatal(pErr) } mtc.waitForValues(keyC, []int64{4, 4, 2}) @@ -1981,7 +1981,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { } truncArgs := truncateLogArgs(index+1, rangeID) truncArgs.Key = keyA - if _, err := client.SendWrapped(ctx, mtc.stores[0].TestSender(), truncArgs); err != nil { + if _, err := kv.SendWrapped(ctx, mtc.stores[0].TestSender(), truncArgs); err != nil { t.Fatal(err) } @@ -2735,7 +2735,7 @@ func TestChangeReplicasLeaveAtomicRacesWithMerge(t *testing.T) { ReplicationMode: base.ReplicationManual, }) // Make a magical context which will allow us to use atomic replica changes. - ctx := client.ChangeReplicasCanMixAddAndRemoveContext(context.Background()) + ctx := kv.ChangeReplicasCanMixAddAndRemoveContext(context.Background()) defer tc.Stopper().Stop(ctx) // We want to first get into a joint consensus scenario. @@ -2942,9 +2942,9 @@ func TestTransferLeaseBlocksWrites(t *testing.T) { // getRangeInfo retreives range info by performing a get against the provided // key and setting the ReturnRangeInfo flag to true. func getRangeInfo( - ctx context.Context, db *client.DB, key roachpb.Key, + ctx context.Context, db *kv.DB, key roachpb.Key, ) (ri *roachpb.RangeInfo, err error) { - err = db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err = db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.Header.ReturnRangeInfo = true b.AddRawRequest(roachpb.NewGet(key)) diff --git a/pkg/kv/kvserver/client_split_test.go b/pkg/kv/kvserver/client_split_test.go index a77f060deb0e..bb95ce11d2e4 100644 --- a/pkg/kv/kvserver/client_split_test.go +++ b/pkg/kv/kvserver/client_split_test.go @@ -28,8 +28,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/abortspan" @@ -94,7 +94,7 @@ func TestStoreRangeSplitAtIllegalKeys(t *testing.T) { keys.MakeTablePrefix(10 /* system descriptor ID */), } { args := adminSplitArgs(key) - _, pErr := client.SendWrapped(context.Background(), store.TestSender(), args) + _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), args) if !testutils.IsPError(pErr, "cannot split") { t.Errorf("%q: unexpected split error %s", key, pErr) } @@ -131,7 +131,7 @@ func TestStoreSplitAbortSpan(t *testing.T) { // First write an intent on the key... incArgs := incrementArgs(key, 1) - _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: pushee}, incArgs) + _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: pushee}, incArgs) if pErr != nil { t.Fatalf("while sending +%v: %s", incArgs, pErr) } @@ -198,7 +198,7 @@ func TestStoreSplitAbortSpan(t *testing.T) { } for _, arg := range args { - _, pErr := client.SendWrapped(ctx, store.TestSender(), arg) + _, pErr := kv.SendWrapped(ctx, store.TestSender(), arg) if pErr != nil { t.Fatalf("while sending +%v: %s", arg, pErr) } @@ -247,7 +247,7 @@ func TestStoreRangeSplitAtTablePrefix(t *testing.T) { key := keys.UserTableDataMin args := adminSplitArgs(key) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { t.Fatalf("%q: split unexpected error: %s", key, pErr) } @@ -258,7 +258,7 @@ func TestStoreRangeSplitAtTablePrefix(t *testing.T) { } // Update SystemConfig to trigger gossip. - if err := store.DB().Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := store.DB().Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } @@ -326,7 +326,7 @@ func TestStoreRangeSplitInsideRow(t *testing.T) { // Split between col1Key and col2Key by splitting before col2Key. args := adminSplitArgs(col2Key) - _, pErr := client.SendWrapped(context.Background(), store.TestSender(), args) + _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), args) if pErr != nil { t.Fatalf("%s: split unexpected error: %s", col1Key, pErr) } @@ -363,18 +363,18 @@ func TestStoreRangeSplitIntents(t *testing.T) { // First, write some values left and right of the proposed split key. pArgs := putArgs([]byte("c"), []byte("foo")) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), pArgs); pErr != nil { t.Fatal(pErr) } pArgs = putArgs([]byte("x"), []byte("bar")) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), pArgs); pErr != nil { t.Fatal(pErr) } // Split the range. splitKey := roachpb.Key("m") args := adminSplitArgs(splitKey) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { t.Fatal(pErr) } @@ -438,14 +438,14 @@ func TestStoreRangeSplitAtRangeBounds(t *testing.T) { rngID := store.LookupReplica(roachpb.RKey(key)).RangeID h := roachpb.Header{RangeID: rngID} args := adminSplitArgs(key) - if _, pErr := client.SendWrappedWith(context.Background(), store, h, args); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store, h, args); pErr != nil { t.Fatal(pErr) } replCount := store.ReplicaCount() // An AdminSplit request sent to the end of the old range // should fail with a RangeKeyMismatchError. - _, pErr := client.SendWrappedWith(context.Background(), store, h, args) + _, pErr := kv.SendWrappedWith(context.Background(), store, h, args) if _, ok := pErr.GetDetail().(*roachpb.RangeKeyMismatchError); !ok { t.Fatalf("expected RangeKeyMismatchError, found: %v", pErr) } @@ -454,7 +454,7 @@ func TestStoreRangeSplitAtRangeBounds(t *testing.T) { // should succeed but no new ranges should be created. newRng := store.LookupReplica(roachpb.RKey(key)) h.RangeID = newRng.RangeID - if _, pErr := client.SendWrappedWith(context.Background(), store, h, args); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store, h, args); pErr != nil { t.Fatal(pErr) } @@ -554,11 +554,11 @@ func TestStoreRangeSplitIdempotency(t *testing.T) { // First, write some values left and right of the proposed split key. pArgs := putArgs([]byte("c"), content) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), pArgs); pErr != nil { t.Fatal(pErr) } pArgs = putArgs([]byte("x"), content) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), pArgs); pErr != nil { t.Fatal(pErr) } @@ -570,7 +570,7 @@ func TestStoreRangeSplitIdempotency(t *testing.T) { lTxn := txn lTxn.Sequence++ lIncArgs.Sequence = lTxn.Sequence - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ Txn: &lTxn, }, lIncArgs); pErr != nil { t.Fatal(pErr) @@ -579,7 +579,7 @@ func TestStoreRangeSplitIdempotency(t *testing.T) { rTxn := txn rTxn.Sequence++ rIncArgs.Sequence = rTxn.Sequence - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ Txn: &rTxn, }, rIncArgs); pErr != nil { t.Fatal(pErr) @@ -594,7 +594,7 @@ func TestStoreRangeSplitIdempotency(t *testing.T) { // Split the range. args := adminSplitArgs(splitKey) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { t.Fatal(pErr) } @@ -624,7 +624,7 @@ func TestStoreRangeSplitIdempotency(t *testing.T) { // Try to get values from both left and right of where the split happened. gArgs := getArgs([]byte("c")) - if reply, pErr := client.SendWrapped(context.Background(), store.TestSender(), gArgs); pErr != nil { + if reply, pErr := kv.SendWrapped(context.Background(), store.TestSender(), gArgs); pErr != nil { t.Fatal(pErr) } else if replyBytes, pErr := reply.(*roachpb.GetResponse).Value.GetBytes(); pErr != nil { t.Fatal(pErr) @@ -632,7 +632,7 @@ func TestStoreRangeSplitIdempotency(t *testing.T) { t.Fatalf("actual value %q did not match expected value %q", replyBytes, content) } gArgs = getArgs([]byte("x")) - if reply, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ RangeID: newRng.RangeID, }, gArgs); pErr != nil { t.Fatal(pErr) @@ -644,7 +644,7 @@ func TestStoreRangeSplitIdempotency(t *testing.T) { // Send out an increment request copied from above (same txn/sequence) // which remains in the old range. - _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ Txn: &lTxn, }, lIncArgs) if pErr != nil { @@ -653,7 +653,7 @@ func TestStoreRangeSplitIdempotency(t *testing.T) { // Send out the same increment copied from above (same txn/sequence), but // now to the newly created range (which should hold that key). - _, pErr = client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + _, pErr = kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ RangeID: newRng.RangeID, Txn: &rTxn, }, rIncArgs) @@ -707,7 +707,7 @@ func TestStoreRangeSplitStats(t *testing.T) { // Split the range after the last table data key. keyPrefix := keys.MakeTablePrefix(keys.MinUserDescID) args := adminSplitArgs(keyPrefix) - if _, pErr := client.SendWrapped(ctx, store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store.TestSender(), args); pErr != nil { t.Fatal(pErr) } // Verify empty range has empty stats. @@ -740,7 +740,7 @@ func TestStoreRangeSplitStats(t *testing.T) { // Split the range at approximate halfway point. args = adminSplitArgs(midKey) - if _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ RangeID: repl.RangeID, }, args); pErr != nil { t.Fatal(pErr) @@ -840,7 +840,7 @@ func TestStoreEmptyRangeSnapshotSize(t *testing.T) { // no user data. splitKey := keys.MakeTablePrefix(keys.MinUserDescID) splitArgs := adminSplitArgs(splitKey) - if _, err := client.SendWrapped(ctx, mtc.distSenders[0], splitArgs); err != nil { + if _, err := kv.SendWrapped(ctx, mtc.distSenders[0], splitArgs); err != nil { t.Fatal(err) } @@ -907,7 +907,7 @@ func TestStoreRangeSplitStatsWithMerges(t *testing.T) { // Split the range after the last table data key. keyPrefix := keys.MakeTablePrefix(keys.MinUserDescID) args := adminSplitArgs(keyPrefix) - if _, pErr := client.SendWrapped(ctx, store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store.TestSender(), args); pErr != nil { t.Fatal(pErr) } // Verify empty range has empty stats. @@ -925,7 +925,7 @@ func TestStoreRangeSplitStatsWithMerges(t *testing.T) { // Split the range at approximate halfway point. args = adminSplitArgs(midKey) - if _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ RangeID: repl.RangeID, }, args); pErr != nil { t.Fatal(pErr) @@ -988,7 +988,7 @@ func fillRange( } val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val) - _, pErr := client.SendWrappedWith(context.Background(), store, roachpb.Header{ + _, pErr := kv.SendWrappedWith(context.Background(), store, roachpb.Header{ RangeID: rangeID, }, pArgs) // When the split occurs in the background, our writes may start failing. @@ -1173,7 +1173,7 @@ func TestStoreRangeSplitBackpressureWrites(t *testing.T) { // Split at the split key. sArgs := adminSplitArgs(splitKey.AsRawKey()) repl := store.LookupReplica(splitKey) - if _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{ RangeID: repl.RangeID, }, sArgs); pErr != nil { t.Fatal(pErr) @@ -1226,7 +1226,7 @@ func TestStoreRangeSplitBackpressureWrites(t *testing.T) { go func() { // Write to the first key of the range to make sure that // we don't end up on the wrong side of the split. - delRes <- store.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + delRes <- store.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.Del(splitKey) return txn.CommitInBatch(ctx, b) @@ -1287,7 +1287,7 @@ func TestStoreRangeSystemSplits(t *testing.T) { // - descriptor IDs are used to determine split keys // - the write triggers a SystemConfig update and gossip // We should end up with splits at each user table prefix. - if err := store.DB().Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := store.DB().Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } @@ -1361,7 +1361,7 @@ func TestStoreRangeSystemSplits(t *testing.T) { // Write another, disjoint (+3) descriptor for a user table. userTableMax += 3 exceptions = map[int]struct{}{userTableMax - 1: {}, userTableMax - 2: {}} - if err := store.DB().Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := store.DB().Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } @@ -1419,17 +1419,17 @@ func runSetupSplitSnapshotRace( // First, do a couple of writes; we'll use these to determine when // the dust has settled. incArgs := incrementArgs(leftKey, 1) - if _, pErr := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); pErr != nil { t.Fatal(pErr) } incArgs = incrementArgs(rightKey, 2) - if _, pErr := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), incArgs); pErr != nil { t.Fatal(pErr) } // Split the system range from the rest of the keyspace. splitArgs := adminSplitArgs(keys.SystemMax) - if _, pErr := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), splitArgs); pErr != nil { t.Fatal(pErr) } @@ -1455,7 +1455,7 @@ func runSetupSplitSnapshotRace( // Split the data range. splitArgs = adminSplitArgs(roachpb.Key("m")) - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil { t.Fatal(pErr) } @@ -1488,7 +1488,7 @@ func runSetupSplitSnapshotRace( // failure and render the range unable to achieve quorum after // restart (in the SnapshotWins branch). incArgs = incrementArgs(rightKey, 3) - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { t.Fatal(pErr) } @@ -1496,7 +1496,7 @@ func runSetupSplitSnapshotRace( mtc.waitForValues(rightKey, []int64{0, 0, 0, 2, 5, 5}) // Scan the meta ranges to resolve all intents - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], &roachpb.ScanRequest{ RequestHeader: roachpb.RequestHeader{ Key: keys.MetaMin, @@ -1530,7 +1530,7 @@ func TestSplitSnapshotRace_SplitWins(t *testing.T) { // Perform a write on the left range and wait for it to propagate. incArgs := incrementArgs(leftKey, 10) - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { t.Fatal(pErr) } mtc.waitForValues(leftKey, []int64{0, 11, 11, 11, 0, 0}) @@ -1541,7 +1541,7 @@ func TestSplitSnapshotRace_SplitWins(t *testing.T) { // Write to the right range. incArgs = incrementArgs(rightKey, 20) - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { t.Fatal(pErr) } mtc.waitForValues(rightKey, []int64{0, 0, 0, 25, 25, 25}) @@ -1562,7 +1562,7 @@ func TestSplitSnapshotRace_SnapshotWins(t *testing.T) { // Perform a write on the right range. incArgs := incrementArgs(rightKey, 20) - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { t.Fatal(pErr) } @@ -1586,13 +1586,13 @@ func TestSplitSnapshotRace_SnapshotWins(t *testing.T) { // it helps wake up dormant ranges that would otherwise have to wait // for retry timeouts. incArgs = incrementArgs(leftKey, 10) - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { t.Fatal(pErr) } mtc.waitForValues(leftKey, []int64{0, 11, 11, 11, 0, 0}) incArgs = incrementArgs(rightKey, 200) - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { t.Fatal(pErr) } mtc.waitForValues(rightKey, []int64{0, 0, 0, 225, 225, 225}) @@ -1662,7 +1662,7 @@ func TestStoreSplitTimestampCacheDifferentLeaseHolder(t *testing.T) { defer cancel() // This transaction will try to write "under" a served read. - txnOld := client.NewTxn(ctx, db, 0 /* gatewayNodeID */) + txnOld := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) // Do something with txnOld so that its timestamp gets set. if _, err := txnOld.Scan(ctx, "a", "b", 0); err != nil { @@ -1886,11 +1886,11 @@ func TestStoreSplitGCThreshold(t *testing.T) { content := []byte("test") pArgs := putArgs(leftKey, content) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), pArgs); pErr != nil { t.Fatal(pErr) } pArgs = putArgs(rightKey, content) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), pArgs); pErr != nil { t.Fatal(pErr) } @@ -1904,12 +1904,12 @@ func TestStoreSplitGCThreshold(t *testing.T) { }, Threshold: specifiedGCThreshold, } - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), gcArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), gcArgs); pErr != nil { t.Fatal(pErr) } args := adminSplitArgs(splitKey) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { t.Fatal(pErr) } @@ -2003,7 +2003,7 @@ func TestStoreRangeSplitRaceUninitializedRHS(t *testing.T) { // range). splitKey := roachpb.Key(encoding.EncodeVarintDescending([]byte("a"), int64(i))) splitArgs := adminSplitArgs(splitKey) - _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs) + _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs) errChan <- pErr }() go func() { @@ -2076,17 +2076,17 @@ func TestLeaderAfterSplit(t *testing.T) { rightKey := roachpb.Key("z") splitArgs := adminSplitArgs(splitKey) - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil { t.Fatal(pErr) } incArgs := incrementArgs(leftKey, 1) - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { t.Fatal(pErr) } incArgs = incrementArgs(rightKey, 2) - if _, pErr := client.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], incArgs); pErr != nil { t.Fatal(pErr) } } @@ -2099,7 +2099,7 @@ func BenchmarkStoreRangeSplit(b *testing.B) { // Perform initial split of ranges. sArgs := adminSplitArgs(roachpb.Key("b")) - if _, err := client.SendWrapped(context.Background(), store.TestSender(), sArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), store.TestSender(), sArgs); err != nil { b.Fatal(err) } @@ -2111,7 +2111,7 @@ func BenchmarkStoreRangeSplit(b *testing.B) { // Merge the b range back into the a range. mArgs := adminMergeArgs(roachpb.KeyMin) - if _, err := client.SendWrapped(context.Background(), store.TestSender(), mArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), store.TestSender(), mArgs); err != nil { b.Fatal(err) } @@ -2119,13 +2119,13 @@ func BenchmarkStoreRangeSplit(b *testing.B) { for i := 0; i < b.N; i++ { // Split the range. b.StartTimer() - if _, err := client.SendWrapped(context.Background(), store.TestSender(), sArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), store.TestSender(), sArgs); err != nil { b.Fatal(err) } // Merge the ranges. b.StopTimer() - if _, err := client.SendWrapped(context.Background(), store.TestSender(), mArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), store.TestSender(), mArgs); err != nil { b.Fatal(err) } } @@ -2167,7 +2167,7 @@ func writeRandomTimeSeriesDataToRange( }, Value: value, } - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ RangeID: rangeID, }, &mArgs); pErr != nil { t.Fatal(pErr) @@ -2278,7 +2278,7 @@ func TestStoreTxnWaitQueueEnabledOnSplit(t *testing.T) { key := keys.UserTableDataMin args := adminSplitArgs(key) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { t.Fatalf("%q: split unexpected error: %s", key, pErr) } @@ -2302,7 +2302,7 @@ func TestDistributedTxnCleanup(t *testing.T) { // Split at "a". lhsKey := roachpb.Key("a") args := adminSplitArgs(lhsKey) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { t.Fatalf("split at %q: %s", lhsKey, pErr) } lhs := store.LookupReplica(roachpb.RKey("a")) @@ -2310,7 +2310,7 @@ func TestDistributedTxnCleanup(t *testing.T) { // Split at "b". rhsKey := roachpb.Key("b") args = adminSplitArgs(rhsKey) - if _, pErr := client.SendWrappedWith(context.Background(), store, roachpb.Header{ + if _, pErr := kv.SendWrappedWith(context.Background(), store, roachpb.Header{ RangeID: lhs.RangeID, }, args); pErr != nil { t.Fatalf("split at %q: %s", rhsKey, pErr) @@ -2327,8 +2327,8 @@ func TestDistributedTxnCleanup(t *testing.T) { // Run a distributed transaction involving the lhsKey and rhsKey. var txnKey roachpb.Key ctx := context.Background() - txn := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) - txnFn := func(ctx context.Context, txn *client.Txn) error { + txn := kv.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */) + txnFn := func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.Put(fmt.Sprintf("%s.force=%t,commit=%t", string(lhsKey), force, commit), "lhsValue") b.Put(fmt.Sprintf("%s.force=%t,commit=%t", string(rhsKey), force, commit), "rhsValue") @@ -2505,7 +2505,7 @@ func TestTxnWaitQueueDependencyCycleWithRangeSplit(t *testing.T) { // Split at "a". args := adminSplitArgs(lhsKey) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { t.Fatalf("split at %q: %s", lhsKey, pErr) } lhs := store.LookupReplica(roachpb.RKey("a")) @@ -2520,7 +2520,7 @@ func TestTxnWaitQueueDependencyCycleWithRangeSplit(t *testing.T) { // Start txn to write key a. txnACh := make(chan error) go func() { - txnACh <- store.DB().Txn(context.Background(), func(ctx context.Context, txn *client.Txn) error { + txnACh <- store.DB().Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.Put(ctx, lhsKey, "value"); err != nil { return err } @@ -2537,7 +2537,7 @@ func TestTxnWaitQueueDependencyCycleWithRangeSplit(t *testing.T) { // Start txn to write key b. txnBCh := make(chan error) go func() { - txnBCh <- store.DB().Txn(context.Background(), func(ctx context.Context, txn *client.Txn) error { + txnBCh <- store.DB().Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.Put(ctx, rhsKey, "value"); err != nil { return err } @@ -2569,7 +2569,7 @@ func TestTxnWaitQueueDependencyCycleWithRangeSplit(t *testing.T) { // Split at "b". args = adminSplitArgs(rhsKey) - if _, pErr := client.SendWrappedWith(context.Background(), store, roachpb.Header{ + if _, pErr := kv.SendWrappedWith(context.Background(), store, roachpb.Header{ RangeID: lhs.RangeID, }, args); pErr != nil { t.Fatalf("split at %q: %s", rhsKey, pErr) @@ -2627,7 +2627,7 @@ func TestStoreCapacityAfterSplit(t *testing.T) { manualClock.Increment(int64(kvserver.MinStatsDuration)) key := roachpb.Key("a") pArgs := putArgs(key, []byte("aaa")) - if _, pErr := client.SendWrapped(context.Background(), s.TestSender(), pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), s.TestSender(), pArgs); pErr != nil { t.Fatal(pErr) } @@ -2665,7 +2665,7 @@ func TestStoreCapacityAfterSplit(t *testing.T) { // Split the range to verify stats work properly with more than one range. sArgs := adminSplitArgs(key) - if _, pErr := client.SendWrapped(context.Background(), s.TestSender(), sArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), s.TestSender(), sArgs); pErr != nil { t.Fatal(pErr) } @@ -2724,13 +2724,13 @@ func TestRangeLookupAfterMeta2Split(t *testing.T) { // will require a scan that continues into the next meta2 range. const tableID = keys.MinUserDescID + 1 // 51 splitReq := adminSplitArgs(keys.MakeTablePrefix(tableID - 3 /* 48 */)) - if _, pErr := client.SendWrapped(ctx, s.DB().NonTransactionalSender(), splitReq); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, s.DB().NonTransactionalSender(), splitReq); pErr != nil { t.Fatal(pErr) } metaKey := keys.RangeMetaKey(keys.MakeTablePrefix(tableID)).AsRawKey() splitReq = adminSplitArgs(metaKey) - if _, pErr := client.SendWrapped(ctx, s.DB().NonTransactionalSender(), splitReq); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, s.DB().NonTransactionalSender(), splitReq); pErr != nil { t.Fatal(pErr) } @@ -2756,7 +2756,7 @@ func TestRangeLookupAfterMeta2Split(t *testing.T) { } else { lookupReq = &roachpb.ScanRequest{RequestHeader: header} } - if _, err := client.SendWrapped(ctx, s.DB().NonTransactionalSender(), lookupReq); err != nil { + if _, err := kv.SendWrapped(ctx, s.DB().NonTransactionalSender(), lookupReq); err != nil { t.Fatalf("%T %v", err.GoError(), err) } }) @@ -2817,7 +2817,7 @@ func TestStoreSplitRangeLookupRace(t *testing.T) { respFilter := func(ba roachpb.BatchRequest, _ *roachpb.BatchResponse) *roachpb.Error { select { case <-blockRangeLookups: - if client.TestingIsRangeLookup(ba) && + if kv.TestingIsRangeLookup(ba) && ba.Requests[0].GetInner().(*roachpb.ScanRequest).Key.Equal(bounds.Key.AsRawKey()) { select { @@ -2856,7 +2856,7 @@ func TestStoreSplitRangeLookupRace(t *testing.T) { // Don't use s.DistSender() so that we don't disturb the RangeDescriptorCache. rangeID := store.LookupReplica(roachpb.RKey(splitKey)).RangeID - _, pErr := client.SendWrappedWith(context.Background(), store, roachpb.Header{ + _, pErr := kv.SendWrappedWith(context.Background(), store, roachpb.Header{ RangeID: rangeID, }, args) if pErr != nil { @@ -2941,12 +2941,12 @@ func TestRangeLookupAsyncResolveIntent(t *testing.T) { // specially by the range descriptor cache. key := roachpb.Key("a") args := adminSplitArgs(key) - if _, pErr := client.SendWrapped(ctx, store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store.TestSender(), args); pErr != nil { t.Fatal(pErr) } // Get original meta2 descriptor. - rs, _, err := client.RangeLookup(ctx, store.TestSender(), key, roachpb.READ_UNCOMMITTED, 0, false) + rs, _, err := kv.RangeLookup(ctx, store.TestSender(), key, roachpb.READ_UNCOMMITTED, 0, false) if err != nil { t.Fatal(err) } @@ -2974,13 +2974,13 @@ func TestRangeLookupAsyncResolveIntent(t *testing.T) { pArgs := putArgs(keys.RangeMetaKey(roachpb.RKey(key2)).AsRawKey(), data) txn.Sequence++ pArgs.Sequence = txn.Sequence - if _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: &txn}, pArgs); pErr != nil { + if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: &txn}, pArgs); pErr != nil { t.Fatal(pErr) } // Clear the range descriptor cache so that any future requests will first // need to perform a RangeLookup. - store.DB().NonTransactionalSender().(*client.CrossRangeTxnWrapperSender).Wrapped().(*kvcoord.DistSender).RangeDescriptorCache().Clear() + store.DB().NonTransactionalSender().(*kv.CrossRangeTxnWrapperSender).Wrapped().(*kvcoord.DistSender).RangeDescriptorCache().Clear() // Now send a request, forcing the RangeLookup. Since the lookup is // inconsistent, there's no WriteIntentError, but we'll try to resolve any @@ -3004,7 +3004,7 @@ func TestStoreSplitDisappearingReplicas(t *testing.T) { for i := 0; i < 100; i++ { key := roachpb.Key(fmt.Sprintf("a%d", i)) args := adminSplitArgs(key) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), args); pErr != nil { t.Fatalf("%q: split unexpected error: %s", key, pErr) } } @@ -3231,7 +3231,7 @@ func TestSplitBlocksReadsToRHS(t *testing.T) { g := ctxgroup.WithContext(ctx) g.GoCtx(func(ctx context.Context) error { args := adminSplitArgs(keySplit) - _, pErr := client.SendWrapped(ctx, store.TestSender(), args) + _, pErr := kv.SendWrapped(ctx, store.TestSender(), args) return pErr.GoError() }) @@ -3255,7 +3255,7 @@ func TestSplitBlocksReadsToRHS(t *testing.T) { g.GoCtx(func(ctx context.Context) error { // Send directly to repl to avoid racing with the // split and routing requests to the post-split RHS. - _, pErr := client.SendWrappedWith(ctx, repl, h, args) + _, pErr := kv.SendWrappedWith(ctx, repl, h, args) errCh <- pErr.GoError() return nil }) diff --git a/pkg/kv/kvserver/client_status_test.go b/pkg/kv/kvserver/client_status_test.go index 0bd408c6896d..5215e6064948 100644 --- a/pkg/kv/kvserver/client_status_test.go +++ b/pkg/kv/kvserver/client_status_test.go @@ -14,7 +14,7 @@ import ( "context" "testing" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -41,7 +41,7 @@ func TestComputeStatsForKeySpan(t *testing.T) { header := roachpb.Header{ RangeID: repl.RangeID, } - if _, err := client.SendWrappedWith(context.Background(), mtc.stores[0], header, args); err != nil { + if _, err := kv.SendWrappedWith(context.Background(), mtc.stores[0], header, args); err != nil { t.Fatal(err) } } diff --git a/pkg/kv/kvserver/client_test.go b/pkg/kv/kvserver/client_test.go index 9f1b2159735b..7f691c79e7bd 100644 --- a/pkg/kv/kvserver/client_test.go +++ b/pkg/kv/kvserver/client_test.go @@ -37,8 +37,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/gossip/resolver" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rditer" @@ -171,7 +171,7 @@ func createTestStoreWithOpts( }, distSender, ) - storeCfg.DB = client.NewDB(ac, tcsFactory, storeCfg.Clock) + storeCfg.DB = kv.NewDB(ac, tcsFactory, storeCfg.Clock) storeCfg.StorePool = kvserver.NewTestStorePool(storeCfg) storeCfg.Transport = kvserver.NewDummyRaftTransport(storeCfg.Settings) // TODO(bdarnell): arrange to have the transport closed. @@ -280,7 +280,7 @@ type multiTestContext struct { engines []storage.Engine grpcServers []*grpc.Server distSenders []*kvcoord.DistSender - dbs []*client.DB + dbs []*kv.DB gossips []*gossip.Gossip storePools []*kvserver.StorePool // We use multiple stoppers so we can restart different parts of the @@ -335,7 +335,7 @@ func (m *multiTestContext) Start(t testing.TB, numStores int) { m.stores = make([]*kvserver.Store, numStores) m.storePools = make([]*kvserver.StorePool, numStores) m.distSenders = make([]*kvcoord.DistSender, numStores) - m.dbs = make([]*client.DB, numStores) + m.dbs = make([]*kv.DB, numStores) m.stoppers = make([]*stop.Stopper, numStores) m.senders = make([]*kvserver.Stores, numStores) m.idents = make([]roachpb.StoreIdent, numStores) @@ -767,7 +767,7 @@ func (m *multiTestContext) populateDB(idx int, st *cluster.Settings, stopper *st }, m.distSenders[idx], ) - m.dbs[idx] = client.NewDB(ambient, tcsFactory, m.clocks[idx]) + m.dbs[idx] = kv.NewDB(ambient, tcsFactory, m.clocks[idx]) } func (m *multiTestContext) populateStorePool( diff --git a/pkg/kv/kvserver/concurrency/concurrency_manager.go b/pkg/kv/kvserver/concurrency/concurrency_manager.go index e355b28fcfcc..022b9565780b 100644 --- a/pkg/kv/kvserver/concurrency/concurrency_manager.go +++ b/pkg/kv/kvserver/concurrency/concurrency_manager.go @@ -14,7 +14,7 @@ import ( "context" "sync" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/spanlatch" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/spanset" @@ -49,7 +49,7 @@ type Config struct { RangeDesc *roachpb.RangeDescriptor // Components. Settings *cluster.Settings - DB *client.DB + DB *kv.DB Clock *hlc.Clock Stopper *stop.Stopper IntentResolver IntentResolver diff --git a/pkg/kv/kvserver/consistency_queue_test.go b/pkg/kv/kvserver/consistency_queue_test.go index aba35618f458..64a648b67fd6 100644 --- a/pkg/kv/kvserver/consistency_queue_test.go +++ b/pkg/kv/kvserver/consistency_queue_test.go @@ -22,7 +22,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" @@ -84,7 +84,7 @@ func TestCheckConsistencyMultiStore(t *testing.T) { // Write something to the DB. putArgs := putArgs([]byte("a"), []byte("b")) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), putArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), putArgs); err != nil { t.Fatal(err) } @@ -96,7 +96,7 @@ func TestCheckConsistencyMultiStore(t *testing.T) { EndKey: []byte("aa"), }, } - if _, err := client.SendWrappedWith(context.Background(), mtc.stores[0].TestSender(), roachpb.Header{ + if _, err := kv.SendWrappedWith(context.Background(), mtc.stores[0].TestSender(), roachpb.Header{ Timestamp: mtc.stores[0].Clock().Now(), }, &checkArgs); err != nil { t.Fatal(err) @@ -158,7 +158,7 @@ func TestCheckConsistencyReplay(t *testing.T) { EndKey: []byte("b"), }, } - if _, err := client.SendWrapped(ctx, mtc.Store(0).TestSender(), &checkArgs); err != nil { + if _, err := kv.SendWrapped(ctx, mtc.Store(0).TestSender(), &checkArgs); err != nil { t.Fatal(err) } @@ -261,11 +261,11 @@ func TestCheckConsistencyInconsistent(t *testing.T) { // Write something to the DB. pArgs := putArgs([]byte("a"), []byte("b")) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), pArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), pArgs); err != nil { t.Fatal(err) } pArgs = putArgs([]byte("c"), []byte("d")) - if _, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), pArgs); err != nil { + if _, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), pArgs); err != nil { t.Fatal(err) } @@ -278,7 +278,7 @@ func TestCheckConsistencyInconsistent(t *testing.T) { }, Mode: roachpb.ChecksumMode_CHECK_VIA_QUEUE, } - resp, err := client.SendWrapped(context.Background(), mtc.stores[0].TestSender(), &checkArgs) + resp, err := kv.SendWrapped(context.Background(), mtc.stores[0].TestSender(), &checkArgs) if err != nil { t.Fatal(err) } @@ -423,8 +423,8 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { key := []byte("a") - computeDelta := func(db *client.DB) enginepb.MVCCStats { - var b client.Batch + computeDelta := func(db *kv.DB) enginepb.MVCCStats { + var b kv.Batch b.AddRawRequest(&roachpb.RecomputeStatsRequest{ RequestHeader: roachpb.RequestHeader{Key: key}, DryRun: true, diff --git a/pkg/kv/kvserver/helpers_test.go b/pkg/kv/kvserver/helpers_test.go index 8e023f7b1957..0c2d9d876dda 100644 --- a/pkg/kv/kvserver/helpers_test.go +++ b/pkg/kv/kvserver/helpers_test.go @@ -26,7 +26,7 @@ import ( circuit "github.com/cockroachdb/circuitbreaker" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rditer" @@ -101,7 +101,7 @@ func (s *Store) ConsistencyQueueShouldQueue( // range which contains the given key. func (s *Store) LogReplicaChangeTest( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, changeType roachpb.ReplicaChangeType, replica roachpb.ReplicaDescriptor, desc roachpb.RangeDescriptor, @@ -499,7 +499,7 @@ func WriteRandomDataToRange( key = append(key, randutil.RandBytes(src, int(src.Int31n(1<<7)))...) val := randutil.RandBytes(src, int(src.Int31n(1<<8))) pArgs := putArgs(key, val) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ RangeID: rangeID, }, &pArgs); pErr != nil { t.Fatal(pErr) diff --git a/pkg/kv/kvserver/idalloc/id_alloc.go b/pkg/kv/kvserver/idalloc/id_alloc.go index 87676117aed2..feebe3357e0f 100644 --- a/pkg/kv/kvserver/idalloc/id_alloc.go +++ b/pkg/kv/kvserver/idalloc/id_alloc.go @@ -17,7 +17,7 @@ import ( "sync/atomic" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/retry" @@ -34,7 +34,7 @@ type Allocator struct { log.AmbientContext idKey atomic.Value - db *client.DB + db *kv.DB blockSize uint32 // Block allocation size ids chan uint32 // Channel of available IDs stopper *stop.Stopper @@ -47,11 +47,7 @@ type Allocator struct { // internally by the allocator that can't be generated). The first value // returned is the existing value + 1, or 1 if the key did not previously exist. func NewAllocator( - ambient log.AmbientContext, - idKey roachpb.Key, - db *client.DB, - blockSize uint32, - stopper *stop.Stopper, + ambient log.AmbientContext, idKey roachpb.Key, db *kv.DB, blockSize uint32, stopper *stop.Stopper, ) (*Allocator, error) { if blockSize == 0 { return nil, errors.Errorf("blockSize must be a positive integer: %d", blockSize) @@ -92,7 +88,7 @@ func (ia *Allocator) start() { for { var newValue int64 var err error - var res client.KeyValue + var res kv.KeyValue for r := retry.Start(base.DefaultRetryOptions()); r.Next(); { idKey := ia.idKey.Load().(roachpb.Key) if err := ia.stopper.RunTask(ctx, "storage.Allocator: allocating block", func(ctx context.Context) { diff --git a/pkg/kv/kvserver/intent_resolver_integration_test.go b/pkg/kv/kvserver/intent_resolver_integration_test.go index b515c3b0074c..a00640afd669 100644 --- a/pkg/kv/kvserver/intent_resolver_integration_test.go +++ b/pkg/kv/kvserver/intent_resolver_integration_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/stop" @@ -77,7 +77,7 @@ func TestContendedIntentWithDependencyCycle(t *testing.T) { go func() { put := putArgs(keyA, []byte("value")) assignSeqNumsForReqs(txn1, &put) - if _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: txn1}, &put); pErr != nil { + if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: txn1}, &put); pErr != nil { txnCh1 <- pErr.GoError() return } @@ -85,7 +85,7 @@ func TestContendedIntentWithDependencyCycle(t *testing.T) { et.LockSpans = []roachpb.Span{spanA, spanB} et.CanCommitAtHigherTimestamp = true assignSeqNumsForReqs(txn1, &et) - _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: txn1}, &et) + _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: txn1}, &et) txnCh1 <- pErr.GoError() }() @@ -95,7 +95,7 @@ func TestContendedIntentWithDependencyCycle(t *testing.T) { readCh1 := make(chan error, 1) go func() { get := getArgs(keyB) - _, pErr := client.SendWrapped(ctx, store.TestSender(), &get) + _, pErr := kv.SendWrapped(ctx, store.TestSender(), &get) readCh1 <- pErr.GoError() }() @@ -104,7 +104,7 @@ func TestContendedIntentWithDependencyCycle(t *testing.T) { go func() { put := putArgs(keyB, []byte("value")) assignSeqNumsForReqs(txn2, &put) - repl, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: txn2}, &put) + repl, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: txn2}, &put) if pErr != nil { txnCh2 <- pErr.GoError() return @@ -115,7 +115,7 @@ func TestContendedIntentWithDependencyCycle(t *testing.T) { et.LockSpans = []roachpb.Span{spanB} et.CanCommitAtHigherTimestamp = true assignSeqNumsForReqs(txn2, &et) - _, pErr = client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: txn2}, &et) + _, pErr = kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: txn2}, &et) txnCh2 <- pErr.GoError() }() @@ -125,7 +125,7 @@ func TestContendedIntentWithDependencyCycle(t *testing.T) { readCh2 := make(chan error, 1) go func() { get := getArgs(keyB) - _, pErr := client.SendWrapped(ctx, store.TestSender(), &get) + _, pErr := kv.SendWrapped(ctx, store.TestSender(), &get) readCh2 <- pErr.GoError() }() @@ -137,7 +137,7 @@ func TestContendedIntentWithDependencyCycle(t *testing.T) { go func() { put := putArgs(keyB, []byte("value")) assignSeqNumsForReqs(txn3, &put) - _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: txn3}, &put) + _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: txn3}, &put) txnCh3 <- pErr.GoError() }() diff --git a/pkg/kv/kvserver/intentresolver/intent_resolver.go b/pkg/kv/kvserver/intentresolver/intent_resolver.go index e0eec18f7fae..4038bcf001f4 100644 --- a/pkg/kv/kvserver/intentresolver/intent_resolver.go +++ b/pkg/kv/kvserver/intentresolver/intent_resolver.go @@ -16,9 +16,9 @@ import ( "sort" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/internal/client/requestbatcher" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvbase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" @@ -94,7 +94,7 @@ const ( // Config contains the dependencies to construct an IntentResolver. type Config struct { Clock *hlc.Clock - DB *client.DB + DB *kv.DB Stopper *stop.Stopper AmbientCtx log.AmbientContext TestingKnobs storagebase.IntentResolverTestingKnobs @@ -113,7 +113,7 @@ type IntentResolver struct { Metrics Metrics clock *hlc.Clock - db *client.DB + db *kv.DB stopper *stop.Stopper testingKnobs storagebase.IntentResolverTestingKnobs ambientCtx log.AmbientContext @@ -341,7 +341,7 @@ func (ir *IntentResolver) MaybePushTransactions( log.Eventf(ctx, "pushing %d transaction(s)", len(pushTxns)) // Attempt to push the transaction(s). - b := &client.Batch{} + b := &kv.Batch{} b.Header.Timestamp = ir.clock.Now() for _, pushTxn := range pushTxns { b.AddRawRequest(&roachpb.PushTxnRequest{ @@ -603,7 +603,7 @@ func (ir *IntentResolver) CleanupTxnIntentsOnGCAsync( log.VErrEventf(ctx, 3, "cannot push a %s transaction which is not expired: %s", txn.Status, txn) return } - b := &client.Batch{} + b := &kv.Batch{} b.Header.Timestamp = now b.AddRawRequest(&roachpb.PushTxnRequest{ RequestHeader: roachpb.RequestHeader{Key: txn.Key}, @@ -841,7 +841,7 @@ func (ir *IntentResolver) ResolveIntents( // requests to a maximum number of keys and resume as necessary. for _, req := range resolveRangeReqs { for { - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = intentResolverBatchSize b.AddRawRequest(req) if err := ir.db.Run(ctx, b); err != nil { diff --git a/pkg/kv/kvserver/intentresolver/intent_resolver_test.go b/pkg/kv/kvserver/intentresolver/intent_resolver_test.go index 8236bd6aa698..ab6023e08818 100644 --- a/pkg/kv/kvserver/intentresolver/intent_resolver_test.go +++ b/pkg/kv/kvserver/intentresolver/intent_resolver_test.go @@ -20,7 +20,7 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -787,14 +787,14 @@ func makeTxnIntents(t *testing.T, clock *hlc.Clock, numIntents int) []roachpb.In type sendFunc func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) func newIntentResolverWithSendFuncs(c Config, sf *sendFuncs) *IntentResolver { - txnSenderFactory := client.NonTransactionalFactoryFunc( + txnSenderFactory := kv.NonTransactionalFactoryFunc( func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { sf.mu.Lock() defer sf.mu.Unlock() f := sf.popLocked() return f(ba) }) - db := client.NewDB(log.AmbientContext{ + db := kv.NewDB(log.AmbientContext{ Tracer: tracing.NewTracer(), }, txnSenderFactory, c.Clock) c.DB = db diff --git a/pkg/kv/kvserver/log.go b/pkg/kv/kvserver/log.go index 873018c842c9..aebb711daa64 100644 --- a/pkg/kv/kvserver/log.go +++ b/pkg/kv/kvserver/log.go @@ -15,7 +15,7 @@ import ( "encoding/json" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagepb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" @@ -26,7 +26,7 @@ import ( ) func (s *Store) insertRangeLogEvent( - ctx context.Context, txn *client.Txn, event storagepb.RangeLogEvent, + ctx context.Context, txn *kv.Txn, event storagepb.RangeLogEvent, ) error { // Record range log event to console log. var info string @@ -98,7 +98,7 @@ func (s *Store) insertRangeLogEvent( // TODO(mrtracy): There are several different reasons that a range split // could occur, and that information should be logged. func (s *Store) logSplit( - ctx context.Context, txn *client.Txn, updatedDesc, newDesc roachpb.RangeDescriptor, + ctx context.Context, txn *kv.Txn, updatedDesc, newDesc roachpb.RangeDescriptor, ) error { if !s.cfg.LogRangeEvents { return nil @@ -122,7 +122,7 @@ func (s *Store) logSplit( // TODO(benesch): There are several different reasons that a range merge // could occur, and that information should be logged. func (s *Store) logMerge( - ctx context.Context, txn *client.Txn, updatedLHSDesc, rhsDesc roachpb.RangeDescriptor, + ctx context.Context, txn *kv.Txn, updatedLHSDesc, rhsDesc roachpb.RangeDescriptor, ) error { if !s.cfg.LogRangeEvents { return nil @@ -146,7 +146,7 @@ func (s *Store) logMerge( // could occur, and that information should be logged. func (s *Store) logChange( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, changeType roachpb.ReplicaChangeType, replica roachpb.ReplicaDescriptor, desc roachpb.RangeDescriptor, diff --git a/pkg/kv/kvserver/log_test.go b/pkg/kv/kvserver/log_test.go index 14a338b91679..8dfbd027f21c 100644 --- a/pkg/kv/kvserver/log_test.go +++ b/pkg/kv/kvserver/log_test.go @@ -18,8 +18,8 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagepb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -258,7 +258,7 @@ func TestLogRebalances(t *testing.T) { // Log several fake events using the store. const details = "test" logEvent := func(changeType roachpb.ReplicaChangeType, reason storagepb.RangeLogEventReason) { - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return store.LogReplicaChangeTest(ctx, txn, changeType, desc.InternalReplicas[0], *desc, reason, details) }); err != nil { t.Fatal(err) diff --git a/pkg/kv/kvserver/merge_queue.go b/pkg/kv/kvserver/merge_queue.go index 2cd86a952a34..270f7a1ed9f1 100644 --- a/pkg/kv/kvserver/merge_queue.go +++ b/pkg/kv/kvserver/merge_queue.go @@ -18,8 +18,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" @@ -85,11 +85,11 @@ var MergeQueueInterval = settings.RegisterNonNegativeDurationSetting( // initiated. type mergeQueue struct { *baseQueue - db *client.DB + db *kv.DB purgChan <-chan time.Time } -func newMergeQueue(store *Store, db *client.DB, gossip *gossip.Gossip) *mergeQueue { +func newMergeQueue(store *Store, db *kv.DB, gossip *gossip.Gossip) *mergeQueue { mq := &mergeQueue{ db: db, purgChan: time.NewTicker(mergeQueuePurgatoryCheckInterval).C, @@ -197,7 +197,7 @@ var _ purgatoryError = rangeMergePurgatoryError{} func (mq *mergeQueue) requestRangeStats( ctx context.Context, key roachpb.Key, ) (*roachpb.RangeDescriptor, enginepb.MVCCStats, float64, error) { - res, pErr := client.SendWrappedWith(ctx, mq.db.NonTransactionalSender(), roachpb.Header{ + res, pErr := kv.SendWrappedWith(ctx, mq.db.NonTransactionalSender(), roachpb.Header{ ReturnRangeInfo: true, }, &roachpb.RangeStatsRequest{ RequestHeader: roachpb.RequestHeader{Key: key}, diff --git a/pkg/kv/kvserver/node_liveness.go b/pkg/kv/kvserver/node_liveness.go index 5529039cfdd5..b1f3e581d6ae 100644 --- a/pkg/kv/kvserver/node_liveness.go +++ b/pkg/kv/kvserver/node_liveness.go @@ -18,8 +18,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/ctpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagepb" @@ -150,7 +150,7 @@ type HeartbeatCallback func(context.Context) type NodeLiveness struct { ambientCtx log.AmbientContext clock *hlc.Clock - db *client.DB + db *kv.DB engines []storage.Engine gossip *gossip.Gossip livenessThreshold time.Duration @@ -178,7 +178,7 @@ type NodeLiveness struct { func NewNodeLiveness( ambient log.AmbientContext, clock *hlc.Clock, - db *client.DB, + db *kv.DB, engines []storage.Engine, g *gossip.Gossip, livenessThreshold time.Duration, @@ -824,7 +824,7 @@ func (nl *NodeLiveness) updateLivenessAttempt( } } - if err := nl.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := nl.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() key := keys.NodeLivenessKey(update.NodeID) val := update.Liveness diff --git a/pkg/kv/kvserver/protectedts/protectedts.go b/pkg/kv/kvserver/protectedts/protectedts.go index 5a6c9bc333cd..134d2d1a3c32 100644 --- a/pkg/kv/kvserver/protectedts/protectedts.go +++ b/pkg/kv/kvserver/protectedts/protectedts.go @@ -16,7 +16,7 @@ import ( "context" "errors" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -64,7 +64,7 @@ type Storage interface { // // An error will be returned if the ID of the provided record already exists // so callers should be sure to generate new IDs when creating records. - Protect(context.Context, *client.Txn, *ptpb.Record) error + Protect(context.Context, *kv.Txn, *ptpb.Record) error // GetRecord retreives the record with the specified UUID as well as the MVCC // timestamp at which it was written. If no corresponding record exists @@ -75,26 +75,26 @@ type Storage interface { // should be protected as well as the timestamp at which the Record providing // that protection is known to be alive. The ReadTimestamp of the Txn used in // this method can be used to provide such a timestamp. - GetRecord(context.Context, *client.Txn, uuid.UUID) (*ptpb.Record, error) + GetRecord(context.Context, *kv.Txn, uuid.UUID) (*ptpb.Record, error) // MarkVerified will mark a protected timestamp as verified. // // This method is generally used by an implementation of Verifier. - MarkVerified(context.Context, *client.Txn, uuid.UUID) error + MarkVerified(context.Context, *kv.Txn, uuid.UUID) error // Release allows spans which were previously protected to now be garbage // collected. // // If the specified UUID does not exist ErrNotFound is returned but the // passed txn remains safe for future use. - Release(context.Context, *client.Txn, uuid.UUID) error + Release(context.Context, *kv.Txn, uuid.UUID) error // GetMetadata retreives the metadata with the provided Txn. - GetMetadata(context.Context, *client.Txn) (ptpb.Metadata, error) + GetMetadata(context.Context, *kv.Txn) (ptpb.Metadata, error) // GetState retreives the entire state of protectedts.Storage with the // provided Txn. - GetState(context.Context, *client.Txn) (ptpb.State, error) + GetState(context.Context, *kv.Txn) (ptpb.State, error) } // Iterator iterates records in a cache until wantMore is false or all Records diff --git a/pkg/kv/kvserver/protectedts/ptcache/cache.go b/pkg/kv/kvserver/protectedts/ptcache/cache.go index 7c88c854b516..0c333d4d2c8f 100644 --- a/pkg/kv/kvserver/protectedts/ptcache/cache.go +++ b/pkg/kv/kvserver/protectedts/ptcache/cache.go @@ -14,7 +14,7 @@ import ( "context" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -31,7 +31,7 @@ import ( // Cache implements protectedts.Cache. type Cache struct { - db *client.DB + db *kv.DB storage protectedts.Storage stopper *stop.Stopper settings *cluster.Settings @@ -56,7 +56,7 @@ type Cache struct { // Config configures a Cache. type Config struct { - DB *client.DB + DB *kv.DB Storage protectedts.Storage Settings *cluster.Settings } @@ -211,7 +211,7 @@ func (c *Cache) doUpdate(ctx context.Context) error { state ptpb.State ts hlc.Timestamp ) - err := c.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + err := c.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { // NB: because this is a read-only transaction, the commit will be a no-op; // returning nil here means the transaction will commit and will never need // to change its read timestamp. diff --git a/pkg/kv/kvserver/protectedts/ptprovider/provider.go b/pkg/kv/kvserver/protectedts/ptprovider/provider.go index e4c22836862a..25423b5e37ef 100644 --- a/pkg/kv/kvserver/protectedts/ptprovider/provider.go +++ b/pkg/kv/kvserver/protectedts/ptprovider/provider.go @@ -15,7 +15,7 @@ package ptprovider import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptcache" @@ -31,7 +31,7 @@ import ( // Config configures the Provider. type Config struct { Settings *cluster.Settings - DB *client.DB + DB *kv.DB Stores *kvserver.Stores ReconcileStatusFuncs ptreconcile.StatusFuncs InternalExecutor sqlutil.InternalExecutor diff --git a/pkg/kv/kvserver/protectedts/ptreconcile/reconciler.go b/pkg/kv/kvserver/protectedts/ptreconcile/reconciler.go index 2879d4760822..28877c8a212d 100644 --- a/pkg/kv/kvserver/protectedts/ptreconcile/reconciler.go +++ b/pkg/kv/kvserver/protectedts/ptreconcile/reconciler.go @@ -17,8 +17,8 @@ import ( "math/rand" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" @@ -42,7 +42,7 @@ var ReconcileInterval = settings.RegisterPublicNonNegativeDurationSetting( // StatusFunc is used to check on the status of a Record based on its Meta // field. type StatusFunc func( - ctx context.Context, txn *client.Txn, meta []byte, + ctx context.Context, txn *kv.Txn, meta []byte, ) (shouldRemove bool, _ error) // StatusFuncs maps from MetaType to a StatusFunc. @@ -53,7 +53,7 @@ type Config struct { Settings *cluster.Settings // Stores is used to ensure that we only run the reconciliation loop on Stores *kvserver.Stores - DB *client.DB + DB *kv.DB Storage protectedts.Storage Cache protectedts.Cache @@ -68,7 +68,7 @@ type Config struct { type Reconciler struct { settings *cluster.Settings localStores *kvserver.Stores - db *client.DB + db *kv.DB cache protectedts.Cache pts protectedts.Storage metrics Metrics @@ -165,7 +165,7 @@ func (r *Reconciler) reconcile(ctx context.Context) { return true } var didRemove bool - if err := r.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + if err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { didRemove = false // reset for retries shouldRemove, err := task(ctx, txn, rec.Meta) if err != nil { diff --git a/pkg/kv/kvserver/protectedts/ptreconcile/reconciler_test.go b/pkg/kv/kvserver/protectedts/ptreconcile/reconciler_test.go index 0ca65b3411dd..2323357c88c5 100644 --- a/pkg/kv/kvserver/protectedts/ptreconcile/reconciler_test.go +++ b/pkg/kv/kvserver/protectedts/ptreconcile/reconciler_test.go @@ -16,8 +16,8 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" @@ -61,7 +61,7 @@ func TestReconciler(t *testing.T) { Cache: ptp, StatusFuncs: ptreconcile.StatusFuncs{ testTaskType: func( - ctx context.Context, txn *client.Txn, meta []byte, + ctx context.Context, txn *kv.Txn, meta []byte, ) (shouldRemove bool, err error) { state.mu.Lock() defer state.mu.Unlock() @@ -83,7 +83,7 @@ func TestReconciler(t *testing.T) { {Key: keys.MinKey, EndKey: keys.MaxKey}, }, } - require.NoError(t, s0.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.NoError(t, s0.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return ptp.Protect(ctx, txn, &rec1) })) @@ -111,7 +111,7 @@ func TestReconciler(t *testing.T) { } return nil }) - require.Regexp(t, protectedts.ErrNotExists, s0.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.Regexp(t, protectedts.ErrNotExists, s0.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { _, err := ptp.GetRecord(ctx, txn, rec1.ID) return err })) diff --git a/pkg/kv/kvserver/protectedts/ptstorage/storage.go b/pkg/kv/kvserver/protectedts/ptstorage/storage.go index 5ef6844d4e62..46dc1cdf0674 100644 --- a/pkg/kv/kvserver/protectedts/ptstorage/storage.go +++ b/pkg/kv/kvserver/protectedts/ptstorage/storage.go @@ -14,7 +14,7 @@ package ptstorage import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/security" @@ -56,7 +56,7 @@ func New(settings *cluster.Settings, ex sqlutil.InternalExecutor) protectedts.St var errNoTxn = errors.New("must provide a non-nil transaction") -func (p *storage) Protect(ctx context.Context, txn *client.Txn, r *ptpb.Record) error { +func (p *storage) Protect(ctx context.Context, txn *kv.Txn, r *ptpb.Record) error { if err := validateRecordForProtect(r); err != nil { return err } @@ -96,9 +96,7 @@ func (p *storage) Protect(ctx context.Context, txn *client.Txn, r *ptpb.Record) return nil } -func (p *storage) GetRecord( - ctx context.Context, txn *client.Txn, id uuid.UUID, -) (*ptpb.Record, error) { +func (p *storage) GetRecord(ctx context.Context, txn *kv.Txn, id uuid.UUID) (*ptpb.Record, error) { if txn == nil { return nil, errNoTxn } @@ -118,7 +116,7 @@ func (p *storage) GetRecord( return &r, nil } -func (p *storage) MarkVerified(ctx context.Context, txn *client.Txn, id uuid.UUID) error { +func (p *storage) MarkVerified(ctx context.Context, txn *kv.Txn, id uuid.UUID) error { if txn == nil { return errNoTxn } @@ -134,7 +132,7 @@ func (p *storage) MarkVerified(ctx context.Context, txn *client.Txn, id uuid.UUI return nil } -func (p *storage) Release(ctx context.Context, txn *client.Txn, id uuid.UUID) error { +func (p *storage) Release(ctx context.Context, txn *kv.Txn, id uuid.UUID) error { if txn == nil { return errNoTxn } @@ -150,7 +148,7 @@ func (p *storage) Release(ctx context.Context, txn *client.Txn, id uuid.UUID) er return nil } -func (p *storage) GetMetadata(ctx context.Context, txn *client.Txn) (ptpb.Metadata, error) { +func (p *storage) GetMetadata(ctx context.Context, txn *kv.Txn) (ptpb.Metadata, error) { if txn == nil { return ptpb.Metadata{}, errNoTxn } @@ -168,7 +166,7 @@ func (p *storage) GetMetadata(ctx context.Context, txn *client.Txn) (ptpb.Metada }, nil } -func (p *storage) GetState(ctx context.Context, txn *client.Txn) (ptpb.State, error) { +func (p *storage) GetState(ctx context.Context, txn *kv.Txn) (ptpb.State, error) { if txn == nil { return ptpb.State{}, errNoTxn } @@ -186,7 +184,7 @@ func (p *storage) GetState(ctx context.Context, txn *client.Txn) (ptpb.State, er }, nil } -func (p *storage) getRecords(ctx context.Context, txn *client.Txn) ([]ptpb.Record, error) { +func (p *storage) getRecords(ctx context.Context, txn *kv.Txn) ([]ptpb.Record, error) { rows, err := p.ex.QueryEx(ctx, "protectedts-GetRecords", txn, sqlbase.InternalExecutorSessionDataOverride{User: security.NodeUser}, getRecordsQuery) diff --git a/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go b/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go index 040d0f6890e0..7aece29babe4 100644 --- a/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go +++ b/pkg/kv/kvserver/protectedts/ptstorage/storage_test.go @@ -23,8 +23,8 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptstorage" @@ -70,7 +70,7 @@ var testCases = []testCase{ ops: []op{ funcOp(func(ctx context.Context, t *testing.T, tCtx *testContext) { rec := newRecord(hlc.Timestamp{}, "", nil, tableSpan(42)) - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return tCtx.pts.Protect(ctx, txn, &rec) }) require.Regexp(t, "invalid zero value timestamp", err.Error()) @@ -83,7 +83,7 @@ var testCases = []testCase{ funcOp(func(ctx context.Context, t *testing.T, tCtx *testContext) { rec := newRecord(tCtx.tc.Server(0).Clock().Now(), "", nil, tableSpan(42)) rec.Verified = true - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return tCtx.pts.Protect(ctx, txn, &rec) }) require.Regexp(t, "cannot create a verified record", err.Error()) @@ -97,7 +97,7 @@ var testCases = []testCase{ funcOp(func(ctx context.Context, t *testing.T, tCtx *testContext) { rec := newRecord(tCtx.tc.Server(0).Clock().Now(), "", nil, tableSpan(42)) rec.ID = pickOneRecord(tCtx) - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return tCtx.pts.Protect(ctx, txn, &rec) }) require.EqualError(t, err, protectedts.ErrExists.Error()) @@ -161,7 +161,7 @@ var testCases = []testCase{ ops: []op{ funcOp(func(ctx context.Context, t *testing.T, tCtx *testContext) { var rec *ptpb.Record - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { rec, err = tCtx.pts.GetRecord(ctx, txn, randomID(tCtx)) return err }) @@ -216,7 +216,7 @@ var testCases = []testCase{ type testContext struct { pts protectedts.Storage tc *testcluster.TestCluster - db *client.DB + db *kv.DB state ptpb.State } @@ -238,7 +238,7 @@ type releaseOp struct { func (r releaseOp) run(ctx context.Context, t *testing.T, tCtx *testContext) { id := r.idFunc(tCtx) - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return tCtx.pts.Release(ctx, txn, id) }) if !testutils.IsError(err, r.expErr) { @@ -269,7 +269,7 @@ type markVerifiedOp struct { func (mv markVerifiedOp) run(ctx context.Context, t *testing.T, tCtx *testContext) { id := mv.idFunc(tCtx) - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return tCtx.pts.MarkVerified(ctx, txn, id) }) if !testutils.IsError(err, mv.expErr) { @@ -296,7 +296,7 @@ func (p protectOp) run(ctx context.Context, t *testing.T, tCtx *testContext) { if p.idFunc != nil { rec.ID = p.idFunc(tCtx) } - err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := tCtx.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return tCtx.pts.Protect(ctx, txn, &rec) }) if !testutils.IsError(err, p.expErr) { @@ -339,12 +339,12 @@ func (test testCase) run(t *testing.T) { } verify := func(t *testing.T) { var state ptpb.State - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { state, err = pts.GetState(ctx, txn) return err })) var md ptpb.Metadata - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { md, err = pts.GetMetadata(ctx, txn) return err })) @@ -352,7 +352,7 @@ func (test testCase) run(t *testing.T) { require.EqualValues(t, tCtx.state.Metadata, md) for _, r := range tCtx.state.Records { var rec *ptpb.Record - require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + require.NoError(t, db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { rec, err = pts.GetRecord(ctx, txn, r.ID) return err })) @@ -436,7 +436,7 @@ func TestCorruptData(t *testing.T) { s.InternalExecutor().(*sql.InternalExecutor)) rec := newRecord(s.Clock().Now(), "foo", []byte("bar"), tableSpan(42)) - require.NoError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.NoError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return pts.Protect(ctx, txn, &rec) })) ie := tc.Server(0).InternalExecutor().(sqlutil.InternalExecutor) @@ -455,12 +455,12 @@ func TestCorruptData(t *testing.T) { var got *ptpb.Record msg := regexp.MustCompile("failed to unmarshal spans for " + rec.ID.String() + ": ") require.Regexp(t, msg, - s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { got, err = pts.GetRecord(ctx, txn, rec.ID) return err }).Error()) require.Nil(t, got) - require.NoError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + require.NoError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { _, err = pts.GetState(ctx, txn) return err })) @@ -481,7 +481,7 @@ func TestCorruptData(t *testing.T) { s.InternalExecutor().(*sql.InternalExecutor)) rec := newRecord(s.Clock().Now(), "foo", []byte("bar"), tableSpan(42)) - require.NoError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.NoError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return pts.Protect(ctx, txn, &rec) })) @@ -505,12 +505,12 @@ func TestCorruptData(t *testing.T) { msg := regexp.MustCompile("failed to parse timestamp for " + rec.ID.String() + ": logical part has too many digits") require.Regexp(t, msg, - s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { got, err = pts.GetRecord(ctx, txn, rec.ID) return err })) require.Nil(t, got) - require.NoError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) (err error) { + require.NoError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) (err error) { _, err = pts.GetState(ctx, txn) return err })) @@ -541,24 +541,24 @@ func TestErrorsFromSQL(t *testing.T) { return errors.New("boom") }) rec := newRecord(s.Clock().Now(), "foo", []byte("bar"), tableSpan(42)) - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return pts.Protect(ctx, txn, &rec) }), fmt.Sprintf("failed to write record %v: boom", rec.ID)) - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { _, err := pts.GetRecord(ctx, txn, rec.ID) return err }), fmt.Sprintf("failed to read record %v: boom", rec.ID)) - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return pts.MarkVerified(ctx, txn, rec.ID) }), fmt.Sprintf("failed to mark record %v as verified: boom", rec.ID)) - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return pts.Release(ctx, txn, rec.ID) }), fmt.Sprintf("failed to release record %v: boom", rec.ID)) - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { _, err := pts.GetMetadata(ctx, txn) return err }), "failed to read metadata: boom") - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { _, err := pts.GetState(ctx, txn) return err }), "failed to read metadata: boom") @@ -573,7 +573,7 @@ func TestErrorsFromSQL(t *testing.T) { } return errors.New("boom") }) - require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.EqualError(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { _, err := pts.GetState(ctx, txn) return err }), "failed to read records: boom") @@ -592,7 +592,7 @@ type wrappedInternalExecutor struct { var _ sqlutil.InternalExecutor = &wrappedInternalExecutor{} func (ie *wrappedInternalExecutor) Exec( - ctx context.Context, opName string, txn *client.Txn, statement string, params ...interface{}, + ctx context.Context, opName string, txn *kv.Txn, statement string, params ...interface{}, ) (int, error) { panic("unimplemented") } @@ -600,7 +600,7 @@ func (ie *wrappedInternalExecutor) Exec( func (ie *wrappedInternalExecutor) ExecEx( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, o sqlbase.InternalExecutorSessionDataOverride, stmt string, qargs ...interface{}, @@ -611,7 +611,7 @@ func (ie *wrappedInternalExecutor) ExecEx( func (ie *wrappedInternalExecutor) QueryEx( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, session sqlbase.InternalExecutorSessionDataOverride, stmt string, qargs ...interface{}, @@ -627,7 +627,7 @@ func (ie *wrappedInternalExecutor) QueryEx( func (ie *wrappedInternalExecutor) QueryWithCols( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, o sqlbase.InternalExecutorSessionDataOverride, statement string, qargs ...interface{}, @@ -638,7 +638,7 @@ func (ie *wrappedInternalExecutor) QueryWithCols( func (ie *wrappedInternalExecutor) QueryRowEx( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, session sqlbase.InternalExecutorSessionDataOverride, stmt string, qargs ...interface{}, @@ -652,13 +652,13 @@ func (ie *wrappedInternalExecutor) QueryRowEx( } func (ie *wrappedInternalExecutor) Query( - ctx context.Context, opName string, txn *client.Txn, statement string, params ...interface{}, + ctx context.Context, opName string, txn *kv.Txn, statement string, params ...interface{}, ) ([]tree.Datums, error) { panic("not implemented") } func (ie *wrappedInternalExecutor) QueryRow( - ctx context.Context, opName string, txn *client.Txn, statement string, qargs ...interface{}, + ctx context.Context, opName string, txn *kv.Txn, statement string, qargs ...interface{}, ) (tree.Datums, error) { panic("not implemented") } diff --git a/pkg/kv/kvserver/protectedts/ptstorage/storage_with_database.go b/pkg/kv/kvserver/protectedts/ptstorage/storage_with_database.go index f2ba46fb7b24..017947a637f9 100644 --- a/pkg/kv/kvserver/protectedts/ptstorage/storage_with_database.go +++ b/pkg/kv/kvserver/protectedts/ptstorage/storage_with_database.go @@ -13,7 +13,7 @@ package ptstorage import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/util/uuid" @@ -21,18 +21,18 @@ import ( // WithDatabase wraps s such that any calls made with a nil *Txn will be wrapped // in a call to db.Txn. This is often convenient in testing. -func WithDatabase(s protectedts.Storage, db *client.DB) protectedts.Storage { +func WithDatabase(s protectedts.Storage, db *kv.DB) protectedts.Storage { return &storageWithDatabase{s: s, db: db} } type storageWithDatabase struct { - db *client.DB + db *kv.DB s protectedts.Storage } -func (s *storageWithDatabase) Protect(ctx context.Context, txn *client.Txn, r *ptpb.Record) error { +func (s *storageWithDatabase) Protect(ctx context.Context, txn *kv.Txn, r *ptpb.Record) error { if txn == nil { - return s.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return s.s.Protect(ctx, txn, r) }) } @@ -40,10 +40,10 @@ func (s *storageWithDatabase) Protect(ctx context.Context, txn *client.Txn, r *p } func (s *storageWithDatabase) GetRecord( - ctx context.Context, txn *client.Txn, id uuid.UUID, + ctx context.Context, txn *kv.Txn, id uuid.UUID, ) (r *ptpb.Record, err error) { if txn == nil { - err = s.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err = s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { r, err = s.s.GetRecord(ctx, txn, id) return err }) @@ -52,20 +52,18 @@ func (s *storageWithDatabase) GetRecord( return s.s.GetRecord(ctx, txn, id) } -func (s *storageWithDatabase) MarkVerified( - ctx context.Context, txn *client.Txn, id uuid.UUID, -) error { +func (s *storageWithDatabase) MarkVerified(ctx context.Context, txn *kv.Txn, id uuid.UUID) error { if txn == nil { - return s.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return s.s.Release(ctx, txn, id) }) } return s.s.Release(ctx, txn, id) } -func (s *storageWithDatabase) Release(ctx context.Context, txn *client.Txn, id uuid.UUID) error { +func (s *storageWithDatabase) Release(ctx context.Context, txn *kv.Txn, id uuid.UUID) error { if txn == nil { - return s.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return s.s.Release(ctx, txn, id) }) } @@ -73,10 +71,10 @@ func (s *storageWithDatabase) Release(ctx context.Context, txn *client.Txn, id u } func (s *storageWithDatabase) GetMetadata( - ctx context.Context, txn *client.Txn, + ctx context.Context, txn *kv.Txn, ) (md ptpb.Metadata, err error) { if txn == nil { - err = s.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err = s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { md, err = s.s.GetMetadata(ctx, txn) return err }) @@ -86,10 +84,10 @@ func (s *storageWithDatabase) GetMetadata( } func (s *storageWithDatabase) GetState( - ctx context.Context, txn *client.Txn, + ctx context.Context, txn *kv.Txn, ) (state ptpb.State, err error) { if txn == nil { - err = s.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err = s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { state, err = s.s.GetState(ctx, txn) return err }) diff --git a/pkg/kv/kvserver/protectedts/ptverifier/verifier.go b/pkg/kv/kvserver/protectedts/ptverifier/verifier.go index 52c171e47b92..3969f70a77ee 100644 --- a/pkg/kv/kvserver/protectedts/ptverifier/verifier.go +++ b/pkg/kv/kvserver/protectedts/ptverifier/verifier.go @@ -13,7 +13,7 @@ package ptverifier import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -24,12 +24,12 @@ import ( // verifier implements protectedts.Verifier. type verifier struct { - db *client.DB + db *kv.DB s protectedts.Storage } // New returns a new Verifier. -func New(db *client.DB, s protectedts.Storage) protectedts.Verifier { +func New(db *kv.DB, s protectedts.Storage) protectedts.Verifier { return &verifier{db: db, s: s} } @@ -57,7 +57,7 @@ func (v *verifier) Verify(ctx context.Context, id uuid.UUID) error { return err } // Mark the record as verified. - return errors.Wrapf(v.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return errors.Wrapf(v.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return v.s.MarkVerified(ctx, txn, id) }), "failed to mark %v as verified", id) } @@ -65,9 +65,9 @@ func (v *verifier) Verify(ctx context.Context, id uuid.UUID) error { // getRecordWithTimestamp fetches the record with the provided id and returns // the hlc timestamp at which that read occurred. func getRecordWithTimestamp( - ctx context.Context, s protectedts.Storage, db *client.DB, id uuid.UUID, + ctx context.Context, s protectedts.Storage, db *kv.DB, id uuid.UUID, ) (r *ptpb.Record, readAt hlc.Timestamp, err error) { - if err = db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err = db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { r, err = s.GetRecord(ctx, txn, id) readAt = txn.ReadTimestamp() return err @@ -77,10 +77,10 @@ func getRecordWithTimestamp( return r, readAt, nil } -func makeVerificationBatch(r *ptpb.Record, aliveAt hlc.Timestamp) client.Batch { +func makeVerificationBatch(r *ptpb.Record, aliveAt hlc.Timestamp) kv.Batch { // Need to perform validation, build a batch and run it. mergedSpans, _ := roachpb.MergeSpans(r.Spans) - var b client.Batch + var b kv.Batch for _, s := range mergedSpans { var req roachpb.AdminVerifyProtectedTimestampRequest req.RecordAliveAt = aliveAt @@ -93,7 +93,7 @@ func makeVerificationBatch(r *ptpb.Record, aliveAt hlc.Timestamp) client.Batch { return b } -func parseResponse(b *client.Batch, r *ptpb.Record) error { +func parseResponse(b *kv.Batch, r *ptpb.Record) error { rawResponse := b.RawResponse() var failed []roachpb.RangeDescriptor for _, r := range rawResponse.Responses { diff --git a/pkg/kv/kvserver/protectedts/ptverifier/verifier_test.go b/pkg/kv/kvserver/protectedts/ptverifier/verifier_test.go index 60e0689f98dc..f3a785aba3a2 100644 --- a/pkg/kv/kvserver/protectedts/ptverifier/verifier_test.go +++ b/pkg/kv/kvserver/protectedts/ptverifier/verifier_test.go @@ -18,8 +18,8 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts/ptpb" @@ -45,7 +45,7 @@ func TestVerifier(t *testing.T) { s := tc.Server(0) var senderFunc atomic.Value - senderFunc.Store(client.SenderFunc(nil)) + senderFunc.Store(kv.SenderFunc(nil)) ds := s.DistSenderI().(*kvcoord.DistSender) tsf := kvcoord.NewTxnCoordSenderFactory( kvcoord.TxnCoordSenderFactoryConfig{ @@ -55,8 +55,8 @@ func TestVerifier(t *testing.T) { Clock: s.Clock(), Stopper: s.Stopper(), }, - client.SenderFunc(func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { - if f := senderFunc.Load().(client.SenderFunc); f != nil { + kv.SenderFunc(func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + if f := senderFunc.Load().(kv.SenderFunc); f != nil { return f(ctx, ba) } return ds.Send(ctx, ba) @@ -65,7 +65,7 @@ func TestVerifier(t *testing.T) { pts := ptstorage.New(s.ClusterSettings(), s.InternalExecutor().(sqlutil.InternalExecutor)) withDB := ptstorage.WithDatabase(pts, s.DB()) - db := client.NewDB(s.DB().AmbientContext, tsf, s.Clock()) + db := kv.NewDB(s.DB().AmbientContext, tsf, s.Clock()) ptv := ptverifier.New(db, pts) makeTableSpan := func(tableID uint32) roachpb.Span { k := roachpb.Key(keys.MakeTablePrefix(tableID)) @@ -83,7 +83,7 @@ func TestVerifier(t *testing.T) { Mode: ptpb.PROTECT_AFTER, Spans: spans, } - require.Nil(t, s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.Nil(t, s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return pts.Protect(ctx, txn, &r) })) return &r @@ -112,7 +112,7 @@ func TestVerifier(t *testing.T) { test: func(t *testing.T) { defer senderFunc.Store(senderFunc.Load()) r := createRecord(t, 42) - senderFunc.Store(client.SenderFunc(func( + senderFunc.Store(kv.SenderFunc(func( ctx context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if _, ok := ba.GetArg(roachpb.AdminVerifyProtectedTimestamp); ok { @@ -130,7 +130,7 @@ func TestVerifier(t *testing.T) { test: func(t *testing.T) { defer senderFunc.Store(senderFunc.Load()) r := createRecord(t, 42) - senderFunc.Store(client.SenderFunc(func( + senderFunc.Store(kv.SenderFunc(func( ctx context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if _, ok := ba.GetArg(roachpb.AdminVerifyProtectedTimestamp); ok { @@ -156,7 +156,7 @@ func TestVerifier(t *testing.T) { test: func(t *testing.T) { defer senderFunc.Store(senderFunc.Load()) r := createRecord(t, 42, 12) - senderFunc.Store(client.SenderFunc(func( + senderFunc.Store(kv.SenderFunc(func( ctx context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if _, ok := ba.GetArg(roachpb.AdminVerifyProtectedTimestamp); ok { @@ -189,7 +189,7 @@ func TestVerifier(t *testing.T) { test: func(t *testing.T) { defer senderFunc.Store(senderFunc.Load()) r := createRecord(t, 42) - senderFunc.Store(client.SenderFunc(func( + senderFunc.Store(kv.SenderFunc(func( ctx context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if _, ok := ba.GetArg(roachpb.AdminVerifyProtectedTimestamp); ok { @@ -203,7 +203,7 @@ func TestVerifier(t *testing.T) { ensureVerified(t, r.ID, true) // Show that we don't send again once we've already verified. sawVerification := false - senderFunc.Store(client.SenderFunc(func( + senderFunc.Store(kv.SenderFunc(func( ctx context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if _, ok := ba.GetArg(roachpb.AdminVerifyProtectedTimestamp); ok { diff --git a/pkg/kv/kvserver/raft_log_queue.go b/pkg/kv/kvserver/raft_log_queue.go index f9bdb89a99cd..ecc261a58971 100644 --- a/pkg/kv/kvserver/raft_log_queue.go +++ b/pkg/kv/kvserver/raft_log_queue.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -58,7 +58,7 @@ const ( // truncated by removing unneeded entries. type raftLogQueue struct { *baseQueue - db *client.DB + db *kv.DB logSnapshots util.EveryN } @@ -70,7 +70,7 @@ type raftLogQueue struct { // log short overall and allowing slower followers to catch up before they get // cut off by a truncation and need a snapshot. See newTruncateDecision for // details on this decision making process. -func newRaftLogQueue(store *Store, db *client.DB, gossip *gossip.Gossip) *raftLogQueue { +func newRaftLogQueue(store *Store, db *kv.DB, gossip *gossip.Gossip) *raftLogQueue { rlq := &raftLogQueue{ db: db, logSnapshots: util.Every(10 * time.Second), @@ -572,7 +572,7 @@ func (rlq *raftLogQueue) process(ctx context.Context, r *Replica, _ *config.Syst } else { log.VEvent(ctx, 1, decision.String()) } - b := &client.Batch{} + b := &kv.Batch{} b.AddRawRequest(&roachpb.TruncateLogRequest{ RequestHeader: roachpb.RequestHeader{Key: r.Desc().StartKey.AsRawKey()}, Index: decision.NewFirstIndex, diff --git a/pkg/kv/kvserver/raft_log_queue_test.go b/pkg/kv/kvserver/raft_log_queue_test.go index 409476b4dbbe..ac4c535d64f3 100644 --- a/pkg/kv/kvserver/raft_log_queue_test.go +++ b/pkg/kv/kvserver/raft_log_queue_test.go @@ -20,8 +20,8 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -471,7 +471,7 @@ func TestNewTruncateDecision(t *testing.T) { for i := 0; i < RaftLogQueueStaleThreshold+1; i++ { key := roachpb.Key(fmt.Sprintf("key%02d", i)) args := putArgs(key, []byte(fmt.Sprintf("value%02d", i))) - if _, err := client.SendWrapped(context.Background(), store.TestSender(), &args); err != nil { + if _, err := kv.SendWrapped(context.Background(), store.TestSender(), &args); err != nil { t.Fatal(err) } } @@ -592,7 +592,7 @@ func TestProactiveRaftLogTruncate(t *testing.T) { for i := 0; i < c.count; i++ { key := roachpb.Key(fmt.Sprintf("key%02d", i)) args := putArgs(key, []byte(fmt.Sprintf("%s%02d", strings.Repeat("v", c.valueSize), i))) - if _, err := client.SendWrapped(ctx, store.TestSender(), &args); err != nil { + if _, err := kv.SendWrapped(ctx, store.TestSender(), &args); err != nil { t.Fatal(err) } } diff --git a/pkg/kv/kvserver/replica.go b/pkg/kv/kvserver/replica.go index e49ab765fb07..854f664e399b 100644 --- a/pkg/kv/kvserver/replica.go +++ b/pkg/kv/kvserver/replica.go @@ -20,8 +20,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/abortspan" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/ctpb" @@ -519,7 +519,7 @@ type KeyRange interface { var _ KeyRange = &Replica{} -var _ client.Sender = &Replica{} +var _ kv.Sender = &Replica{} // String returns the string representation of the replica using an // inconsistent copy of the range descriptor. Therefore, String does not @@ -637,7 +637,7 @@ func (r *Replica) Clock() *hlc.Clock { } // DB returns the Replica's client DB. -func (r *Replica) DB() *client.DB { +func (r *Replica) DB() *kv.DB { return r.store.DB() } @@ -1210,7 +1210,7 @@ func (r *Replica) maybeWatchForMerge(ctx context.Context) error { // roachpb.PUSH_TOUCH, though it might appear more semantically correct, // returns immediately and causes us to spin hot, whereas // roachpb.PUSH_ABORT efficiently blocks until the transaction completes. - b := &client.Batch{} + b := &kv.Batch{} b.Header.Timestamp = r.Clock().Now() b.AddRawRequest(&roachpb.PushTxnRequest{ RequestHeader: roachpb.RequestHeader{Key: intent.Txn.Key}, @@ -1255,7 +1255,7 @@ func (r *Replica) maybeWatchForMerge(ctx context.Context) error { var getRes *roachpb.GetResponse for retry := retry.Start(base.DefaultRetryOptions()); retry.Next(); { metaKey := keys.RangeMetaKey(desc.EndKey) - res, pErr := client.SendWrappedWith(ctx, r.DB().NonTransactionalSender(), roachpb.Header{ + res, pErr := kv.SendWrappedWith(ctx, r.DB().NonTransactionalSender(), roachpb.Header{ // Use READ_UNCOMMITTED to avoid trying to resolve intents, since // resolving those intents might involve sending requests to this // range, and that could deadlock. See the comment on diff --git a/pkg/kv/kvserver/replica_command.go b/pkg/kv/kvserver/replica_command.go index b000c3f6993e..599240449753 100644 --- a/pkg/kv/kvserver/replica_command.go +++ b/pkg/kv/kvserver/replica_command.go @@ -21,8 +21,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagepb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -164,7 +164,7 @@ func setStickyBit(desc *roachpb.RangeDescriptor, expiration hlc.Timestamp) { func splitTxnAttempt( ctx context.Context, store *Store, - txn *client.Txn, + txn *kv.Txn, rightRangeID roachpb.RangeID, splitKey roachpb.RKey, expiration hlc.Timestamp, @@ -241,7 +241,7 @@ func splitTxnAttempt( } func splitTxnStickyUpdateAttempt( - ctx context.Context, txn *client.Txn, desc *roachpb.RangeDescriptor, expiration hlc.Timestamp, + ctx context.Context, txn *kv.Txn, desc *roachpb.RangeDescriptor, expiration hlc.Timestamp, ) error { _, dbDescValue, err := conditionalGetDescValueFromDB(ctx, txn, desc.StartKey, checkDescsEqual(desc)) if err != nil { @@ -364,7 +364,7 @@ func (r *Replica) adminSplitWithDescriptor( // Even if the range is already split, we should still update the sticky // bit if it has a later expiration time. if desc.GetStickyBit().Less(args.ExpirationTime) { - err := r.store.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := r.store.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return splitTxnStickyUpdateAttempt(ctx, txn, desc, args.ExpirationTime) }) // The ConditionFailedError can occur because the descriptors acting as @@ -397,7 +397,7 @@ func (r *Replica) adminSplitWithDescriptor( log.Infof(ctx, "initiating a split of this range at key %s [r%d] (%s)%s", splitKey.StringWithDirs(nil /* valDirs */, 50 /* maxLen */), rightRangeID, reason, extra) - if err := r.store.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := r.store.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return splitTxnAttempt(ctx, r.store, txn, rightRangeID, splitKey, args.ExpirationTime, desc) }); err != nil { // The ConditionFailedError can occur because the descriptors acting @@ -449,7 +449,7 @@ func (r *Replica) adminUnsplitWithDescriptor( return reply, nil } - if err := r.store.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := r.store.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { _, dbDescValue, err := conditionalGetDescValueFromDB(ctx, txn, desc.StartKey, checkDescsEqual(desc)) if err != nil { return err @@ -566,7 +566,7 @@ func (r *Replica) AdminMerge( ) (roachpb.AdminMergeResponse, *roachpb.Error) { var reply roachpb.AdminMergeResponse - runMergeTxn := func(txn *client.Txn) error { + runMergeTxn := func(txn *kv.Txn) error { log.Event(ctx, "merge txn begins") txn.SetDebugName(mergeTxnName) @@ -715,7 +715,7 @@ func (r *Replica) AdminMerge( // a consistent view of the data from the right-hand range. If the merge // commits, we'll write this data to the left-hand range in the merge // trigger. - br, pErr := client.SendWrapped(ctx, r.store.DB().NonTransactionalSender(), + br, pErr := kv.SendWrapped(ctx, r.store.DB().NonTransactionalSender(), &roachpb.SubsumeRequest{ RequestHeader: roachpb.RequestHeader{Key: rightDesc.StartKey.AsRawKey()}, LeftDesc: *origLeftDesc, @@ -767,7 +767,7 @@ func (r *Replica) AdminMerge( // Note that client.DB.Txn performs retries using the same transaction, so we // have to use our own retry loop. for { - txn := client.NewTxn(ctx, r.store.DB(), r.NodeID()) + txn := kv.NewTxn(ctx, r.store.DB(), r.NodeID()) err := runMergeTxn(txn) if err != nil { txn.CleanupOnError(ctx, err) @@ -1581,7 +1581,7 @@ func execChangeReplicasTxn( return checkDescsEqual(referenceDesc)(kvDesc) } - if err := store.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := store.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { log.Event(ctx, "attempting txn") txn.SetDebugName(replicaChangeTxnName) desc, dbDescValue, err := conditionalGetDescValueFromDB(ctx, txn, referenceDesc.StartKey, check) @@ -1956,7 +1956,7 @@ func checkDescsEqual(desc *roachpb.RangeDescriptor) func(*roachpb.RangeDescripto // the same thing, but also correctly handles proto equality. See #38308. func conditionalGetDescValueFromDB( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, startKey roachpb.RKey, check func(*roachpb.RangeDescriptor) bool, ) (*roachpb.RangeDescriptor, *roachpb.Value, error) { @@ -1993,7 +1993,7 @@ func conditionalGetDescValueFromDB( // descriptor, a CommitTrigger must be used to update the in-memory // descriptor; it will not automatically be copied from newDesc. func updateRangeDescriptor( - b *client.Batch, descKey roachpb.Key, oldValue *roachpb.Value, newDesc *roachpb.RangeDescriptor, + b *kv.Batch, descKey roachpb.Key, oldValue *roachpb.Value, newDesc *roachpb.RangeDescriptor, ) error { // This is subtle: []byte(nil) != interface{}(nil). A []byte(nil) refers to // an empty value. An interface{}(nil) refers to a non-existent value. So @@ -2035,7 +2035,7 @@ func (s *Store) AdminRelocateRange( // with this hack for a few weeks. // // TODO(tbg): remove in 20.1. - ctx = client.ChangeReplicasCanMixAddAndRemoveContext(ctx) + ctx = kv.ChangeReplicasCanMixAddAndRemoveContext(ctx) } // Step 0: Remove everything that's not a full voter so we don't have to think @@ -2124,7 +2124,7 @@ func (s *Store) AdminRelocateRange( // 19.2+ (in which the AdminChangeReplicas RPC was extended to support // mixing additions and removals), don't send such requests but unroll // the ops here, running them one by one; see for details: - _ = client.ChangeReplicasCanMixAddAndRemoveContext + _ = kv.ChangeReplicasCanMixAddAndRemoveContext // Make sure we don't issue anything but singles and swaps before // this migration is gone (for it doesn't support anything else). @@ -2138,7 +2138,7 @@ func (s *Store) AdminRelocateRange( success := true for _, ops := range opss { newDesc, err := s.DB().AdminChangeReplicas( - client.ChangeReplicasCanMixAddAndRemoveContext(ctx), + kv.ChangeReplicasCanMixAddAndRemoveContext(ctx), startKey, rangeDesc, ops, @@ -2309,7 +2309,7 @@ func (s *Store) relocateOne( // that, we need to first move the lease elsewhere. This is not possible // if there is no other replica available at that point, i.e. if the // existing descriptor is a single replica that's being replaced. - var b client.Batch + var b kv.Batch liReq := &roachpb.LeaseInfoRequest{} liReq.Key = desc.StartKey.AsRawKey() b.AddRawRequest(liReq) diff --git a/pkg/kv/kvserver/replica_consistency.go b/pkg/kv/kvserver/replica_consistency.go index b086343f870d..4ef8ffd25927 100644 --- a/pkg/kv/kvserver/replica_consistency.go +++ b/pkg/kv/kvserver/replica_consistency.go @@ -22,8 +22,8 @@ import ( "sync/atomic" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rditer" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader" @@ -232,7 +232,7 @@ func (r *Replica) CheckConsistency( // so ContainsEstimates should have been strictly positive. var v roachpb.Version - if err := r.store.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := r.store.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return txn.GetProto(ctx, keys.BootstrapVersionKey, &v) }); err != nil { log.Infof(ctx, "while retrieving cluster bootstrap version: %s", err) @@ -258,7 +258,7 @@ func (r *Replica) CheckConsistency( RequestHeader: roachpb.RequestHeader{Key: startKey}, } - var b client.Batch + var b kv.Batch b.AddRawRequest(&req) err := r.store.db.Run(ctx, &b) @@ -338,7 +338,7 @@ func (r *Replica) RunConsistencyCheck( ) ([]ConsistencyCheckResult, error) { // Send a ComputeChecksum which will trigger computation of the checksum on // all replicas. - res, pErr := client.SendWrapped(ctx, r.store.db.NonTransactionalSender(), &req) + res, pErr := kv.SendWrapped(ctx, r.store.db.NonTransactionalSender(), &req) if pErr != nil { return nil, pErr.GoError() } diff --git a/pkg/kv/kvserver/replica_eval_context_span.go b/pkg/kv/kvserver/replica_eval_context_span.go index 89ee0d4eb679..c6359e245b81 100644 --- a/pkg/kv/kvserver/replica_eval_context_span.go +++ b/pkg/kv/kvserver/replica_eval_context_span.go @@ -13,8 +13,8 @@ package kvserver import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/abortspan" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency" @@ -70,7 +70,7 @@ func (rec *SpanSetReplicaEvalContext) Clock() *hlc.Clock { } // DB returns the Replica's client DB. -func (rec *SpanSetReplicaEvalContext) DB() *client.DB { +func (rec *SpanSetReplicaEvalContext) DB() *kv.DB { return rec.i.DB() } diff --git a/pkg/kv/kvserver/replica_gc_queue.go b/pkg/kv/kvserver/replica_gc_queue.go index bd6ddfaeccb3..98b2080c91ed 100644 --- a/pkg/kv/kvserver/replica_gc_queue.go +++ b/pkg/kv/kvserver/replica_gc_queue.go @@ -16,7 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -88,11 +88,11 @@ func makeReplicaGCQueueMetrics() ReplicaGCQueueMetrics { type replicaGCQueue struct { *baseQueue metrics ReplicaGCQueueMetrics - db *client.DB + db *kv.DB } // newReplicaGCQueue returns a new instance of replicaGCQueue. -func newReplicaGCQueue(store *Store, db *client.DB, gossip *gossip.Gossip) *replicaGCQueue { +func newReplicaGCQueue(store *Store, db *kv.DB, gossip *gossip.Gossip) *replicaGCQueue { rgcq := &replicaGCQueue{ metrics: makeReplicaGCQueueMetrics(), db: db, @@ -220,7 +220,7 @@ func (rgcq *replicaGCQueue) process( // want to do a consistent read here. This is important when we are // considering one of the metadata ranges: we must not do an inconsistent // lookup in our own copy of the range. - rs, _, err := client.RangeLookup(ctx, rgcq.db.NonTransactionalSender(), desc.StartKey.AsRawKey(), + rs, _, err := kv.RangeLookup(ctx, rgcq.db.NonTransactionalSender(), desc.StartKey.AsRawKey(), roachpb.CONSISTENT, 0 /* prefetchNum */, false /* reverse */) if err != nil { return err @@ -323,7 +323,7 @@ func (rgcq *replicaGCQueue) process( leftRepl := repl.store.lookupPrecedingReplica(desc.StartKey) if leftRepl != nil { leftDesc := leftRepl.Desc() - rs, _, err := client.RangeLookup(ctx, rgcq.db.NonTransactionalSender(), leftDesc.StartKey.AsRawKey(), + rs, _, err := kv.RangeLookup(ctx, rgcq.db.NonTransactionalSender(), leftDesc.StartKey.AsRawKey(), roachpb.CONSISTENT, 0 /* prefetchNum */, false /* reverse */) if err != nil { return err diff --git a/pkg/kv/kvserver/replica_rangefeed_test.go b/pkg/kv/kvserver/replica_rangefeed_test.go index c969926108b6..76931b3fa3d9 100644 --- a/pkg/kv/kvserver/replica_rangefeed_test.go +++ b/pkg/kv/kvserver/replica_rangefeed_test.go @@ -16,7 +16,7 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -96,7 +96,7 @@ func TestReplicaRangefeed(t *testing.T) { // Split the range so that the RHS uses epoch-based leases. startKey := []byte("a") splitArgs := adminSplitArgs(startKey) - if _, pErr := client.SendWrapped(ctx, db, splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, db, splitArgs); pErr != nil { t.Fatalf("split saw unexpected error: %v", pErr) } rangeID := mtc.Store(0).LookupReplica(startKey).RangeID @@ -106,7 +106,7 @@ func TestReplicaRangefeed(t *testing.T) { mtc.manualClock.Increment(1) ts1 := mtc.clock.Now() incArgs := incrementArgs(roachpb.Key("b"), 9) - _, pErr := client.SendWrappedWith(ctx, db, roachpb.Header{Timestamp: ts1}, incArgs) + _, pErr := kv.SendWrappedWith(ctx, db, roachpb.Header{Timestamp: ts1}, incArgs) if pErr != nil { t.Fatal(pErr) } @@ -179,7 +179,7 @@ func TestReplicaRangefeed(t *testing.T) { mtc.manualClock.Increment(1) ts2 := mtc.clock.Now() pArgs := putArgs(roachpb.Key("c"), []byte("val2")) - _, pErr = client.SendWrappedWith(ctx, db, roachpb.Header{Timestamp: ts2}, pArgs) + _, pErr = kv.SendWrappedWith(ctx, db, roachpb.Header{Timestamp: ts2}, pArgs) if pErr != nil { t.Fatal(pErr) } @@ -187,7 +187,7 @@ func TestReplicaRangefeed(t *testing.T) { // Insert a second key transactionally. mtc.manualClock.Increment(1) ts3 := mtc.clock.Now() - if err := mtc.dbs[1].Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := mtc.dbs[1].Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { txn.SetFixedTimestamp(ctx, ts3) return txn.Put(ctx, roachpb.Key("m"), []byte("val3")) }); err != nil { @@ -201,7 +201,7 @@ func TestReplicaRangefeed(t *testing.T) { // Update the originally incremented key non-transactionally. mtc.manualClock.Increment(1) ts4 := mtc.clock.Now() - _, pErr = client.SendWrappedWith(ctx, db, roachpb.Header{Timestamp: ts4}, incArgs) + _, pErr = kv.SendWrappedWith(ctx, db, roachpb.Header{Timestamp: ts4}, incArgs) if pErr != nil { t.Fatal(pErr) } @@ -209,7 +209,7 @@ func TestReplicaRangefeed(t *testing.T) { // Update the originally incremented key transactionally. mtc.manualClock.Increment(1) ts5 := mtc.clock.Now() - if err := mtc.dbs[1].Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := mtc.dbs[1].Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { txn.SetFixedTimestamp(ctx, ts5) _, err := txn.Inc(ctx, incArgs.Key, 7) return err @@ -361,7 +361,7 @@ func TestReplicaRangefeedRetryErrors(t *testing.T) { // Split the range so that the RHS uses epoch-based leases. splitArgs := adminSplitArgs(startKey) - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], splitArgs); pErr != nil { subT.Fatalf("split saw unexpected error: %v", pErr) } rangeID := mtc.Store(0).LookupReplica(startKey).RangeID @@ -369,7 +369,7 @@ func TestReplicaRangefeedRetryErrors(t *testing.T) { // Write to the RHS of the split and wait for all replicas to process it. // This ensures that all replicas have seen the split before we move on. incArgs := incrementArgs(roachpb.Key("a"), 9) - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], incArgs); pErr != nil { t.Fatal(pErr) } mtc.waitForValues(roachpb.Key("a"), []int64{9, 9, 9}) @@ -482,7 +482,7 @@ func TestReplicaRangefeedRetryErrors(t *testing.T) { // Split the range. args := adminSplitArgs([]byte("m")) - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], args); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], args); pErr != nil { t.Fatalf("split saw unexpected error: %v", pErr) } @@ -497,7 +497,7 @@ func TestReplicaRangefeedRetryErrors(t *testing.T) { // Split the range. splitKey := []byte("m") splitArgs := adminSplitArgs(splitKey) - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], splitArgs); pErr != nil { t.Fatalf("split saw unexpected error: %v", pErr) } rightRangeID := mtc.Store(0).LookupReplica(splitKey).RangeID @@ -505,7 +505,7 @@ func TestReplicaRangefeedRetryErrors(t *testing.T) { // Write to the RHS of the split and wait for all replicas to process it. // This ensures that all replicas have seen the split before we move on. incArgs := incrementArgs(roachpb.Key("n"), 9) - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], incArgs); pErr != nil { t.Fatal(pErr) } mtc.waitForValues(roachpb.Key("n"), []int64{9, 9, 9}) @@ -548,7 +548,7 @@ func TestReplicaRangefeedRetryErrors(t *testing.T) { // Merge the ranges back together mergeArgs := adminMergeArgs(startKey) - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], mergeArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], mergeArgs); pErr != nil { t.Fatalf("merge saw unexpected error: %v", pErr) } @@ -612,7 +612,7 @@ func TestReplicaRangefeedRetryErrors(t *testing.T) { // Perform a write on the range. pArgs := putArgs(roachpb.Key("c"), []byte("val2")) - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], pArgs); pErr != nil { t.Fatal(pErr) } @@ -631,7 +631,7 @@ func TestReplicaRangefeedRetryErrors(t *testing.T) { // replica rejoins the rest of the range. truncArgs := truncateLogArgs(index+1, rangeID) truncArgs.Key = startKey - if _, err := client.SendWrapped(ctx, mtc.distSenders[0], truncArgs); err != nil { + if _, err := kv.SendWrapped(ctx, mtc.distSenders[0], truncArgs); err != nil { t.Fatal(err) } @@ -686,7 +686,7 @@ func TestReplicaRangefeedRetryErrors(t *testing.T) { // Perform a write on the range. pArgs := putArgs(roachpb.Key("c"), []byte("val2")) - if _, pErr := client.SendWrapped(ctx, mtc.distSenders[0], pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, mtc.distSenders[0], pArgs); pErr != nil { t.Fatal(pErr) } diff --git a/pkg/kv/kvserver/replica_sideload_test.go b/pkg/kv/kvserver/replica_sideload_test.go index 9ddefd122dbe..340e435a7e1d 100644 --- a/pkg/kv/kvserver/replica_sideload_test.go +++ b/pkg/kv/kvserver/replica_sideload_test.go @@ -26,7 +26,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/raftentry" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" @@ -711,7 +711,7 @@ func testRaftSSTableSideloadingProposal(t *testing.T, engineInMem, mockSideloade for i := 0; i < RaftLogQueueStaleThreshold+1; i++ { key := roachpb.Key(fmt.Sprintf("key%02d", i)) args := putArgs(key, []byte(fmt.Sprintf("value%02d", i))) - if _, err := client.SendWrapped(context.Background(), tc.store.TestSender(), &args); err != nil { + if _, err := kv.SendWrapped(context.Background(), tc.store.TestSender(), &args); err != nil { t.Fatal(err) } } @@ -1012,7 +1012,7 @@ func TestRaftSSTableSideloadingTruncation(t *testing.T) { newFirstIndex := indexes[i] + 1 truncateArgs := truncateLogArgs(newFirstIndex, rangeID) log.Eventf(ctx, "truncating to index < %d", newFirstIndex) - if _, pErr := client.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{RangeID: rangeID}, &truncateArgs); pErr != nil { + if _, pErr := kv.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{RangeID: rangeID}, &truncateArgs); pErr != nil { t.Fatal(pErr) } sideloadStrings := fmtSideloaded() diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index fddcb8aac7da..1e626d625720 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -32,8 +32,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/apply" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result" @@ -237,7 +237,7 @@ func (tc *testContext) StartWithStoreConfigAndVersion( // circular dependency between the test sender and the store. The actual // store will be passed to the sender after it is created and bootstrapped. factory := &testSenderFactory{} - cfg.DB = client.NewDB(cfg.AmbientCtx, factory, cfg.Clock) + cfg.DB = kv.NewDB(cfg.AmbientCtx, factory, cfg.Clock) if err := InitEngine(ctx, tc.engine, roachpb.StoreIdent{ ClusterID: uuid.MakeV4(), @@ -311,8 +311,8 @@ func (tc *testContext) StartWithStoreConfigAndVersion( } } -func (tc *testContext) Sender() client.Sender { - return client.Wrap(tc.repl, func(ba roachpb.BatchRequest) roachpb.BatchRequest { +func (tc *testContext) Sender() kv.Sender { + return kv.Wrap(tc.repl, func(ba roachpb.BatchRequest) roachpb.BatchRequest { if ba.RangeID == 0 { ba.RangeID = 1 } @@ -331,7 +331,7 @@ func (tc *testContext) Sender() client.Sender { func (tc *testContext) SendWrappedWith( h roachpb.Header, args roachpb.Request, ) (roachpb.Response, *roachpb.Error) { - return client.SendWrappedWith(context.Background(), tc.Sender(), h, args) + return kv.SendWrappedWith(context.Background(), tc.Sender(), h, args) } // SendWrapped is identical to SendWrappedWith with a zero header. @@ -391,7 +391,7 @@ func (tc *testContext) addBogusReplicaToRangeDesc( // Update the "on-disk" replica state, so that it doesn't diverge from what we // have in memory. At the time of this writing, this is not actually required // by the tests using this functionality, but it seems sane to do. - ba := client.Batch{ + ba := kv.Batch{ Header: roachpb.Header{Timestamp: tc.Clock().Now()}, } descKey := keys.RangeDescriptorKey(oldDesc.StartKey) @@ -927,7 +927,7 @@ func TestReplicaRangeBoundsChecking(t *testing.T) { } gArgs := getArgs(roachpb.Key("b")) - _, pErr := client.SendWrappedWith(context.Background(), tc.store, roachpb.Header{ + _, pErr := kv.SendWrappedWith(context.Background(), tc.store, roachpb.Header{ RangeID: 1, }, &gArgs) @@ -1499,7 +1499,7 @@ func TestReplicaNoGossipConfig(t *testing.T) { for i, test := range testCases { assignSeqNumsForReqs(txn, test.req) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), test.h, test.req); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), test.h, test.req); pErr != nil { t.Fatal(pErr) } @@ -1530,7 +1530,7 @@ func TestReplicaNoGossipFromNonLeader(t *testing.T) { req1 := putArgs(key, nil) assignSeqNumsForReqs(txn, &req1) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{ Txn: txn, }, &req1); pErr != nil { t.Fatal(pErr) @@ -3673,7 +3673,7 @@ func TestEndTxnDeadline(t *testing.T) { put := putArgs(key, key) assignSeqNumsForReqs(txn, &put) - if _, pErr := client.SendWrappedWith( + if _, pErr := kv.SendWrappedWith( context.Background(), tc.Sender(), roachpb.Header{Txn: txn}, &put, ); pErr != nil { t.Fatal(pErr) @@ -3953,7 +3953,7 @@ func TestEndTxnWithMalformedSplitTrigger(t *testing.T) { txn := newTransaction("test", key, 1, tc.Clock()) pArgs := putArgs(key, []byte("only here to make this a rw transaction")) assignSeqNumsForReqs(txn, &pArgs) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{ Txn: txn, }, &pArgs); pErr != nil { t.Fatal(pErr) @@ -4119,7 +4119,7 @@ func TestEndTxnWithPushedTimestamp(t *testing.T) { pusher.Priority = enginepb.MaxTxnPriority // pusher will win put := putArgs(key, []byte("value")) assignSeqNumsForReqs(pushee, &put) - if _, pErr := client.SendWrappedWith( + if _, pErr := kv.SendWrappedWith( context.Background(), tc.Sender(), roachpb.Header{Txn: pushee}, &put, ); pErr != nil { t.Fatal(pErr) @@ -4171,7 +4171,7 @@ func TestEndTxnWithIncrementedEpoch(t *testing.T) { txn := newTransaction("test", key, 1, tc.Clock()) put := putArgs(key, key) assignSeqNumsForReqs(txn, &put) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: txn}, &put); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: txn}, &put); pErr != nil { t.Fatal(pErr) } @@ -4276,7 +4276,7 @@ func TestEndTxnRollbackAbortedTransaction(t *testing.T) { txn := newTransaction("test", key, 1, tc.Clock()) put := putArgs(key, key) assignSeqNumsForReqs(txn, &put) - if _, pErr := client.SendWrappedWith( + if _, pErr := kv.SendWrappedWith( context.TODO(), tc.Sender(), roachpb.Header{Txn: txn}, &put, ); pErr != nil { t.Fatal(pErr) @@ -4620,7 +4620,7 @@ func TestEndTxnLocalGC(t *testing.T) { txn := newTransaction("test", key, 1, tc.Clock()) put := putArgs(putKey, key) assignSeqNumsForReqs(txn, &put) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: txn}, &put); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: txn}, &put); pErr != nil { t.Fatal(pErr) } putKey = putKey.Next() // for the next iteration @@ -4658,7 +4658,7 @@ func setupResolutionTest( pArgs := putArgs(key, []byte("value")) h := roachpb.Header{Txn: txn} assignSeqNumsForReqs(txn, &pArgs) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), h, &pArgs); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), h, &pArgs); pErr != nil { t.Fatal(pErr) } @@ -5019,7 +5019,7 @@ func TestAbortSpanPoisonOnResolve(t *testing.T) { RequestHeader: roachpb.RequestHeader{Key: k}, Increment: 123, } assignSeqNumsForReqs(actor, incArgs) - reply, pErr := client.SendWrappedWith(context.Background(), tc.store, roachpb.Header{ + reply, pErr := kv.SendWrappedWith(context.Background(), tc.store, roachpb.Header{ Txn: actor, RangeID: 1, }, incArgs) @@ -5032,7 +5032,7 @@ func TestAbortSpanPoisonOnResolve(t *testing.T) { get := func(actor *roachpb.Transaction, k roachpb.Key) *roachpb.Error { gArgs := getArgs(k) assignSeqNumsForReqs(actor, &gArgs) - _, pErr := client.SendWrappedWith(context.Background(), tc.store, roachpb.Header{ + _, pErr := kv.SendWrappedWith(context.Background(), tc.store, roachpb.Header{ Txn: actor, RangeID: 1, }, &gArgs) @@ -5204,7 +5204,7 @@ func TestPushTxnAlreadyCommittedOrAborted(t *testing.T) { // Begin the pushee's transaction. put := putArgs(key, key) assignSeqNumsForReqs(pushee, &put) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: pushee}, &put); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: pushee}, &put); pErr != nil { t.Fatal(pErr) } // End the pushee's transaction. @@ -5276,7 +5276,7 @@ func TestPushTxnUpgradeExistingTxn(t *testing.T) { pushee.LastHeartbeat = test.startTS pushee.ReadTimestamp = test.startTS hb, hbH := heartbeatArgs(pushee, pushee.WriteTimestamp) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), hbH, &hb); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), hbH, &hb); pErr != nil { t.Fatal(pErr) } @@ -5331,7 +5331,7 @@ func TestPushTxnQueryPusheeHasNewerVersion(t *testing.T) { put := putArgs(key, key) assignSeqNumsForReqs(pushee, &put) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: pushee}, &put); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: pushee}, &put); pErr != nil { t.Fatal(pErr) } @@ -5459,7 +5459,7 @@ func TestPushTxnHeartbeatTimeout(t *testing.T) { // Establish "start" of existing pushee's txn via HeartbeatTxn request // if the test case wants an existing transaction record. hb, hbH := heartbeatArgs(pushee, pushee.WriteTimestamp) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), hbH, &hb); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), hbH, &hb); pErr != nil { t.Fatalf("%d: %s", i, pErr) } case roachpb.STAGING: @@ -5467,7 +5467,7 @@ func TestPushTxnHeartbeatTimeout(t *testing.T) { et.InFlightWrites = []roachpb.SequencedWrite{ {Key: key, Sequence: 1}, } - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), etH, &et); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), etH, &et); pErr != nil { t.Fatalf("%d: %s", i, pErr) } default: @@ -5603,7 +5603,7 @@ func TestPushTxnPriorities(t *testing.T) { put := putArgs(key, key) assignSeqNumsForReqs(pushee, &put) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: pushee}, &put); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: pushee}, &put); pErr != nil { t.Fatal(pErr) } // Now, attempt to push the transaction with intent epoch set appropriately. @@ -5646,7 +5646,7 @@ func TestPushTxnPushTimestamp(t *testing.T) { key := roachpb.Key("a") put := putArgs(key, key) assignSeqNumsForReqs(pushee, &put) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: pushee}, &put); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: pushee}, &put); pErr != nil { t.Fatal(pErr) } @@ -5688,7 +5688,7 @@ func TestPushTxnPushTimestampAlreadyPushed(t *testing.T) { key := roachpb.Key("a") put := putArgs(key, key) assignSeqNumsForReqs(pushee, &put) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: pushee}, &put); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: pushee}, &put); pErr != nil { t.Fatal(pErr) } @@ -5738,7 +5738,7 @@ func TestPushTxnSerializableRestart(t *testing.T) { // Write to a key. put := putArgs(key, []byte("foo")) assignSeqNumsForReqs(pushee, &put) - resp, pErr := client.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{Txn: pushee}, &put) + resp, pErr := kv.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{Txn: pushee}, &put) if pErr != nil { t.Fatal(pErr) } @@ -6329,7 +6329,7 @@ func TestReplicaDanglingMetaIntent(t *testing.T) { key := roachpb.Key("a") // Get original meta2 descriptor. - rs, _, err := client.RangeLookup(ctx, tc.Sender(), key, roachpb.READ_UNCOMMITTED, 0, reverse) + rs, _, err := kv.RangeLookup(ctx, tc.Sender(), key, roachpb.READ_UNCOMMITTED, 0, reverse) if err != nil { t.Fatal(err) } @@ -6354,7 +6354,7 @@ func TestReplicaDanglingMetaIntent(t *testing.T) { // priority). pArgs := putArgs(keys.RangeMetaKey(roachpb.RKey(key)).AsRawKey(), data) assignSeqNumsForReqs(txn, &pArgs) - if _, pErr := client.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{Txn: txn}, &pArgs); pErr != nil { + if _, pErr := kv.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{Txn: txn}, &pArgs); pErr != nil { t.Fatal(pErr) } @@ -6364,7 +6364,7 @@ func TestReplicaDanglingMetaIntent(t *testing.T) { // // Note that 'A' < 'a'. newKey := roachpb.Key{'A'} - rs, _, err = client.RangeLookup(ctx, tc.Sender(), newKey, roachpb.READ_UNCOMMITTED, 0, reverse) + rs, _, err = kv.RangeLookup(ctx, tc.Sender(), newKey, roachpb.READ_UNCOMMITTED, 0, reverse) if err != nil { t.Fatal(err) } @@ -6379,7 +6379,7 @@ func TestReplicaDanglingMetaIntent(t *testing.T) { } // Switch to consistent lookups, which should run into the intent. - _, _, err = client.RangeLookup(ctx, tc.Sender(), newKey, roachpb.CONSISTENT, 0, reverse) + _, _, err = kv.RangeLookup(ctx, tc.Sender(), newKey, roachpb.CONSISTENT, 0, reverse) if _, ok := err.(*roachpb.WriteIntentError); !ok { t.Fatalf("expected WriteIntentError, not %s", err) } @@ -6459,7 +6459,7 @@ func TestReplicaLookupUseReverseScan(t *testing.T) { // Test reverse RangeLookup scan without intents. for _, c := range testCases { - rs, _, err := client.RangeLookup(ctx, tc.Sender(), roachpb.Key(c.key), + rs, _, err := kv.RangeLookup(ctx, tc.Sender(), roachpb.Key(c.key), roachpb.READ_UNCOMMITTED, 0, true) if err != nil { t.Fatal(err) @@ -6488,7 +6488,7 @@ func TestReplicaLookupUseReverseScan(t *testing.T) { // Test reverse RangeLookup scan with intents. for _, c := range testCases { - rs, _, err := client.RangeLookup(ctx, tc.Sender(), roachpb.Key(c.key), + rs, _, err := kv.RangeLookup(ctx, tc.Sender(), roachpb.Key(c.key), roachpb.READ_UNCOMMITTED, 0, true) if err != nil { t.Fatal(err) @@ -6528,7 +6528,7 @@ func TestRangeLookup(t *testing.T) { } for i, c := range testCases { - rs, _, err := client.RangeLookup(ctx, tc.Sender(), c.key.AsRawKey(), + rs, _, err := kv.RangeLookup(ctx, tc.Sender(), c.key.AsRawKey(), roachpb.CONSISTENT, 0, c.reverse) if err != nil { if c.expected != nil { @@ -6568,7 +6568,7 @@ func TestRequestLeaderEncounterGroupDeleteError(t *testing.T) { gArgs := getArgs(roachpb.Key("a")) // Force the read command request a new lease. manual.Set(leaseExpiry(tc.repl)) - _, pErr := client.SendWrappedWith(context.Background(), tc.store, roachpb.Header{ + _, pErr := kv.SendWrappedWith(context.Background(), tc.store, roachpb.Header{ Timestamp: tc.Clock().Now(), RangeID: 1, }, &gArgs) @@ -6755,7 +6755,7 @@ func TestReplicaLoadSystemConfigSpanIntent(t *testing.T) { pushee.Priority = enginepb.MinTxnPriority // low so it can be pushed put := putArgs(key, []byte("foo")) assignSeqNumsForReqs(pushee, &put) - if _, pErr := client.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: pushee}, &put); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), tc.Sender(), roachpb.Header{Txn: pushee}, &put); pErr != nil { t.Fatal(pErr) } @@ -6954,7 +6954,7 @@ func TestEntries(t *testing.T) { truncateLogs := func(index int) { truncateArgs := truncateLogArgs(indexes[index], rangeID) - if _, err := client.SendWrappedWith( + if _, err := kv.SendWrappedWith( context.Background(), tc.Sender(), roachpb.Header{RangeID: 1}, @@ -7181,10 +7181,10 @@ func TestGCIncorrectRange(t *testing.T) { ts2 := now.Add(2, 0) ts1Header := roachpb.Header{RangeID: repl2.RangeID, Timestamp: ts1} ts2Header := roachpb.Header{RangeID: repl2.RangeID, Timestamp: ts2} - if _, pErr := client.SendWrappedWith(context.Background(), repl2, ts1Header, &putReq); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), repl2, ts1Header, &putReq); pErr != nil { t.Errorf("unexpected pError on put key request: %s", pErr) } - if _, pErr := client.SendWrappedWith(context.Background(), repl2, ts2Header, &putReq); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), repl2, ts2Header, &putReq); pErr != nil { t.Errorf("unexpected pError on put key request: %s", pErr) } @@ -7193,7 +7193,7 @@ func TestGCIncorrectRange(t *testing.T) { // the request for the incorrect key will be silently dropped. gKey := gcKey(key, ts1) gcReq := gcArgs(repl1.Desc().StartKey, repl1.Desc().EndKey, gKey) - if _, pErr := client.SendWrappedWith( + if _, pErr := kv.SendWrappedWith( context.Background(), repl1, roachpb.Header{RangeID: 1, Timestamp: tc.Clock().Now()}, @@ -7204,7 +7204,7 @@ func TestGCIncorrectRange(t *testing.T) { // Make sure the key still exists on range 2. getReq := getArgs(key) - if res, pErr := client.SendWrappedWith(context.Background(), repl2, ts1Header, &getReq); pErr != nil { + if res, pErr := kv.SendWrappedWith(context.Background(), repl2, ts1Header, &getReq); pErr != nil { t.Errorf("unexpected pError on get request to correct range: %s", pErr) } else if resVal := res.(*roachpb.GetResponse).Value; resVal == nil { t.Errorf("expected value %s to exists after GC to incorrect range but before GC to correct range, found %v", val, resVal) @@ -7212,7 +7212,7 @@ func TestGCIncorrectRange(t *testing.T) { // Send GC request to range 2 for the same key. gcReq = gcArgs(repl2.Desc().StartKey, repl2.Desc().EndKey, gKey) - if _, pErr := client.SendWrappedWith( + if _, pErr := kv.SendWrappedWith( context.Background(), repl2, roachpb.Header{RangeID: repl2.RangeID, Timestamp: tc.Clock().Now()}, @@ -7222,7 +7222,7 @@ func TestGCIncorrectRange(t *testing.T) { } // Make sure the key no longer exists on range 2. - if res, pErr := client.SendWrappedWith(context.Background(), repl2, ts1Header, &getReq); pErr != nil { + if res, pErr := kv.SendWrappedWith(context.Background(), repl2, ts1Header, &getReq); pErr != nil { t.Errorf("unexpected pError on get request to correct range: %s", pErr) } else if resVal := res.(*roachpb.GetResponse).Value; resVal != nil { t.Errorf("expected value at key %s to no longer exist after GC to correct range, found value %v", key, resVal) @@ -7926,13 +7926,13 @@ func TestReplicaRefreshMultiple(t *testing.T) { // without going below 0). for i := 0; i < 3; i++ { inc := incrementArgs(key, 1) - if _, pErr := client.SendWrapped(ctx, tc.Sender(), inc); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, tc.Sender(), inc); pErr != nil { t.Fatal(pErr) } } // Sanity check the resulting value. get := getArgs(key) - if resp, pErr := client.SendWrapped(ctx, tc.Sender(), &get); pErr != nil { + if resp, pErr := kv.SendWrapped(ctx, tc.Sender(), &get); pErr != nil { t.Fatal(pErr) } else if x, err := resp.(*roachpb.GetResponse).Value.GetInt(); err != nil { t.Fatalf("returned non-int: %+v", err) @@ -8016,7 +8016,7 @@ func TestReplicaRefreshMultiple(t *testing.T) { // even in the buggy case, since illegal lease index proposals do // not generate reevaluations (and increment is handled upstream of // raft). - if resp, pErr := client.SendWrapped(ctx, tc.Sender(), &get); pErr != nil { + if resp, pErr := kv.SendWrapped(ctx, tc.Sender(), &get); pErr != nil { t.Fatal(pErr) } else if x, err := resp.(*roachpb.GetResponse).Value.GetInt(); err != nil { t.Fatalf("returned non-int: %+v", err) @@ -8698,7 +8698,7 @@ func TestCancelPendingCommands(t *testing.T) { errChan := make(chan *roachpb.Error, 1) go func() { incArgs := incrementArgs(roachpb.Key("a"), 1) - _, pErr := client.SendWrapped(ctx, tc.Sender(), incArgs) + _, pErr := kv.SendWrapped(ctx, tc.Sender(), incArgs) errChan <- pErr }() @@ -10203,7 +10203,7 @@ func TestRangeStatsRequest(t *testing.T) { // returns the same MVCC stats as the replica's in-memory state. WriteRandomDataToRange(t, tc.store, tc.repl.RangeID, keyPrefix) expMS := tc.repl.GetMVCCStats() - res, pErr := client.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{ + res, pErr := kv.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{ RangeID: tc.repl.RangeID, }, &roachpb.RangeStatsRequest{}) if pErr != nil { @@ -10218,7 +10218,7 @@ func TestRangeStatsRequest(t *testing.T) { if err := tc.store.DB().Put(ctx, key, "123"); err != nil { t.Fatal(err) } - res, pErr = client.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{ + res, pErr = kv.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{ RangeID: tc.repl.RangeID, }, &roachpb.RangeStatsRequest{}) if pErr != nil { @@ -10256,7 +10256,7 @@ func TestTxnRecordLifecycleTransitions(t *testing.T) { type runFunc func(*roachpb.Transaction, hlc.Timestamp) error sendWrappedWithErr := func(h roachpb.Header, args roachpb.Request) error { - _, pErr := client.SendWrappedWith(ctx, tc.Sender(), h, args) + _, pErr := kv.SendWrappedWith(ctx, tc.Sender(), h, args) return pErr.GoError() } @@ -11725,7 +11725,7 @@ func TestRollbackMissingTxnRecordNoError(t *testing.T) { key := roachpb.Key("bogus key") txn := newTransaction("test", key, roachpb.NormalUserPriority, tc.Clock()) - res, pErr := client.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{ + res, pErr := kv.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{ RangeID: tc.repl.RangeID, Txn: txn, }, &roachpb.EndTxnRequest{ @@ -11743,7 +11743,7 @@ func TestRollbackMissingTxnRecordNoError(t *testing.T) { // For good measure, let's take the opportunity to check replay protection for // a HeartbeatTxn arriving after the rollback. - _, pErr = client.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{ + _, pErr = kv.SendWrappedWith(ctx, tc.Sender(), roachpb.Header{ RangeID: tc.repl.RangeID, Txn: txn, }, &roachpb.HeartbeatTxnRequest{ diff --git a/pkg/kv/kvserver/replicate_test.go b/pkg/kv/kvserver/replicate_test.go index fcd156b08124..10affcb6294b 100644 --- a/pkg/kv/kvserver/replicate_test.go +++ b/pkg/kv/kvserver/replicate_test.go @@ -14,7 +14,7 @@ import ( "context" "testing" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -46,7 +46,7 @@ func TestEagerReplication(t *testing.T) { key := roachpb.Key("a") args := adminSplitArgs(key) - _, pErr := client.SendWrapped(ctx, store.TestSender(), args) + _, pErr := kv.SendWrapped(ctx, store.TestSender(), args) if pErr != nil { t.Fatal(pErr) } diff --git a/pkg/kv/kvserver/reports/constraint_stats_report.go b/pkg/kv/kvserver/reports/constraint_stats_report.go index 12f269822310..9782c1be6c5f 100644 --- a/pkg/kv/kvserver/reports/constraint_stats_report.go +++ b/pkg/kv/kvserver/reports/constraint_stats_report.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" @@ -177,7 +177,7 @@ func (r *replicationConstraintStatsReportSaver) ensureEntries( } func (r *replicationConstraintStatsReportSaver) loadPreviousVersion( - ctx context.Context, ex sqlutil.InternalExecutor, txn *client.Txn, + ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn, ) error { // The data for the previous save needs to be loaded if: // - this is the first time that we call this method and lastUpdatedAt has never been set @@ -223,7 +223,7 @@ func (r *replicationConstraintStatsReportSaver) updatePreviousVersion() { } func (r *replicationConstraintStatsReportSaver) updateTimestamp( - ctx context.Context, ex sqlutil.InternalExecutor, txn *client.Txn, reportTS time.Time, + ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn, reportTS time.Time, ) error { if !r.lastGenerated.IsZero() && reportTS == r.lastGenerated { return errors.Errorf( @@ -248,10 +248,10 @@ func (r *replicationConstraintStatsReportSaver) updateTimestamp( // // reportTS is the time that will be set in the updated_at column for every row. func (r *replicationConstraintStatsReportSaver) Save( - ctx context.Context, reportTS time.Time, db *client.DB, ex sqlutil.InternalExecutor, + ctx context.Context, reportTS time.Time, db *kv.DB, ex sqlutil.InternalExecutor, ) error { r.lastUpdatedRowCount = 0 - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { err := r.loadPreviousVersion(ctx, ex, txn) if err != nil { return err @@ -308,10 +308,10 @@ func (r *replicationConstraintStatsReportSaver) Save( func (r *replicationConstraintStatsReportSaver) upsertConstraintStatus( ctx context.Context, reportTS time.Time, - txn *client.Txn, + txn *kv.Txn, key ConstraintStatusKey, violationCount int, - db *client.DB, + db *kv.DB, ex sqlutil.InternalExecutor, ) error { var err error diff --git a/pkg/kv/kvserver/reports/critical_localities_report.go b/pkg/kv/kvserver/reports/critical_localities_report.go index 87aa2fd023b1..d0284f5e2413 100644 --- a/pkg/kv/kvserver/reports/critical_localities_report.go +++ b/pkg/kv/kvserver/reports/critical_localities_report.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" @@ -90,7 +90,7 @@ func (r *replicationCriticalLocalitiesReportSaver) AddCriticalLocality( } func (r *replicationCriticalLocalitiesReportSaver) loadPreviousVersion( - ctx context.Context, ex sqlutil.InternalExecutor, txn *client.Txn, + ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn, ) error { // The data for the previous save needs to be loaded if: // - this is the first time that we call this method and lastUpdatedAt has never been set @@ -135,7 +135,7 @@ func (r *replicationCriticalLocalitiesReportSaver) updatePreviousVersion() { } func (r *replicationCriticalLocalitiesReportSaver) updateTimestamp( - ctx context.Context, ex sqlutil.InternalExecutor, txn *client.Txn, reportTS time.Time, + ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn, reportTS time.Time, ) error { if !r.lastGenerated.IsZero() && reportTS == r.lastGenerated { return errors.Errorf( @@ -160,10 +160,10 @@ func (r *replicationCriticalLocalitiesReportSaver) updateTimestamp( // // reportTS is the time that will be set in the updated_at column for every row. func (r *replicationCriticalLocalitiesReportSaver) Save( - ctx context.Context, reportTS time.Time, db *client.DB, ex sqlutil.InternalExecutor, + ctx context.Context, reportTS time.Time, db *kv.DB, ex sqlutil.InternalExecutor, ) error { r.lastUpdatedRowCount = 0 - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { err := r.loadPreviousVersion(ctx, ex, txn) if err != nil { return err @@ -219,10 +219,10 @@ func (r *replicationCriticalLocalitiesReportSaver) Save( func (r *replicationCriticalLocalitiesReportSaver) upsertLocality( ctx context.Context, reportTS time.Time, - txn *client.Txn, + txn *kv.Txn, key localityKey, status localityStatus, - db *client.DB, + db *kv.DB, ex sqlutil.InternalExecutor, ) error { var err error diff --git a/pkg/kv/kvserver/reports/replication_stats_report.go b/pkg/kv/kvserver/reports/replication_stats_report.go index 4225d9b44f1b..8b455a8b8024 100644 --- a/pkg/kv/kvserver/reports/replication_stats_report.go +++ b/pkg/kv/kvserver/reports/replication_stats_report.go @@ -17,7 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" @@ -93,7 +93,7 @@ func (r *replicationStatsReportSaver) AddZoneRangeStatus( } func (r *replicationStatsReportSaver) loadPreviousVersion( - ctx context.Context, ex sqlutil.InternalExecutor, txn *client.Txn, + ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn, ) error { // The data for the previous save needs to be loaded if: // - this is the first time that we call this method and lastUpdatedAt has never been set @@ -143,7 +143,7 @@ func (r *replicationStatsReportSaver) updatePreviousVersion() { } func (r *replicationStatsReportSaver) updateTimestamp( - ctx context.Context, ex sqlutil.InternalExecutor, txn *client.Txn, reportTS time.Time, + ctx context.Context, ex sqlutil.InternalExecutor, txn *kv.Txn, reportTS time.Time, ) error { if !r.lastGenerated.IsZero() && reportTS == r.lastGenerated { return errors.Errorf( @@ -168,10 +168,10 @@ func (r *replicationStatsReportSaver) updateTimestamp( // // reportTS is the time that will be set in the updated_at column for every row. func (r *replicationStatsReportSaver) Save( - ctx context.Context, reportTS time.Time, db *client.DB, ex sqlutil.InternalExecutor, + ctx context.Context, reportTS time.Time, db *kv.DB, ex sqlutil.InternalExecutor, ) error { r.lastUpdatedRowCount = 0 - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { err := r.loadPreviousVersion(ctx, ex, txn) if err != nil { return err @@ -226,10 +226,10 @@ func (r *replicationStatsReportSaver) Save( func (r *replicationStatsReportSaver) upsertStats( ctx context.Context, reportTS time.Time, - txn *client.Txn, + txn *kv.Txn, key ZoneKey, stats zoneRangeStatus, - db *client.DB, + db *kv.DB, ex sqlutil.InternalExecutor, ) error { var err error diff --git a/pkg/kv/kvserver/reports/reporter.go b/pkg/kv/kvserver/reports/reporter.go index 3d1d93992fca..535befbb6f23 100644 --- a/pkg/kv/kvserver/reports/reporter.go +++ b/pkg/kv/kvserver/reports/reporter.go @@ -19,8 +19,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" @@ -59,7 +59,7 @@ type Reporter struct { // Latest zone config latestConfig *config.SystemConfig - db *client.DB + db *kv.DB liveness *kvserver.NodeLiveness settings *cluster.Settings storePool *kvserver.StorePool @@ -74,7 +74,7 @@ type Reporter struct { // NewReporter creates a Reporter. func NewReporter( - db *client.DB, + db *kv.DB, localStores *kvserver.Stores, storePool *kvserver.StorePool, st *cluster.Settings, @@ -699,14 +699,14 @@ type RangeIterator interface { // meta2RangeIter is an implementation of RangeIterator that scans meta2 in a // paginated way. type meta2RangeIter struct { - db *client.DB + db *kv.DB // The size of the batches that descriptors will be read in. 0 for no limit. batchSize int - txn *client.Txn + txn *kv.Txn // buffer contains descriptors read in the first batch, but not yet returned // to the client. - buffer []client.KeyValue + buffer []kv.KeyValue // resumeSpan maintains the point where the meta2 scan stopped. resumeSpan *roachpb.Span // readingDone is set once we've scanned all of meta2. buffer may still @@ -714,7 +714,7 @@ type meta2RangeIter struct { readingDone bool } -func makeMeta2RangeIter(db *client.DB, batchSize int) meta2RangeIter { +func makeMeta2RangeIter(db *kv.DB, batchSize int) meta2RangeIter { return meta2RangeIter{db: db, batchSize: batchSize} } @@ -840,7 +840,7 @@ type reportID int // getReportGenerationTime returns the time at a particular report was last // generated. Returns time.Time{} if the report is not found. func getReportGenerationTime( - ctx context.Context, rid reportID, ex sqlutil.InternalExecutor, txn *client.Txn, + ctx context.Context, rid reportID, ex sqlutil.InternalExecutor, txn *kv.Txn, ) (time.Time, error) { row, err := ex.QueryRowEx( ctx, diff --git a/pkg/kv/kvserver/single_key_test.go b/pkg/kv/kvserver/single_key_test.go index a9a83efe2b6c..f88302eb0ca3 100644 --- a/pkg/kv/kvserver/single_key_test.go +++ b/pkg/kv/kvserver/single_key_test.go @@ -17,7 +17,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -68,7 +68,7 @@ func TestSingleKey(t *testing.T) { var r result for timeutil.Now().Before(deadline) { start := timeutil.Now() - err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { minExp := atomic.LoadInt64(&expected) r, err := txn.Get(ctx, key) if err != nil { diff --git a/pkg/kv/kvserver/split_queue.go b/pkg/kv/kvserver/split_queue.go index ec98c18087d4..bbf1d6a34fcd 100644 --- a/pkg/kv/kvserver/split_queue.go +++ b/pkg/kv/kvserver/split_queue.go @@ -17,7 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" @@ -49,7 +49,7 @@ const ( // or along intersecting zone config boundaries. type splitQueue struct { *baseQueue - db *client.DB + db *kv.DB purgChan <-chan time.Time // loadBasedCount counts the load-based splits performed by the queue. @@ -57,7 +57,7 @@ type splitQueue struct { } // newSplitQueue returns a new instance of splitQueue. -func newSplitQueue(store *Store, db *client.DB, gossip *gossip.Gossip) *splitQueue { +func newSplitQueue(store *Store, db *kv.DB, gossip *gossip.Gossip) *splitQueue { var purgChan <-chan time.Time if c := store.TestingKnobs().SplitQueuePurgatoryChan; c != nil { purgChan = c diff --git a/pkg/kv/kvserver/storagebase/bulk_adder.go b/pkg/kv/kvserver/storagebase/bulk_adder.go index f94ec2e0acbc..78ef4a434a3d 100644 --- a/pkg/kv/kvserver/storagebase/bulk_adder.go +++ b/pkg/kv/kvserver/storagebase/bulk_adder.go @@ -14,7 +14,7 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/hlc" ) @@ -61,7 +61,7 @@ type BulkAdderOptions struct { // BulkAdderFactory describes a factory function for BulkAdders. type BulkAdderFactory func( - ctx context.Context, db *client.DB, timestamp hlc.Timestamp, opts BulkAdderOptions, + ctx context.Context, db *kv.DB, timestamp hlc.Timestamp, opts BulkAdderOptions, ) (BulkAdder, error) // BulkAdder describes a bulk-adding helper that can be used to add lots of KVs. diff --git a/pkg/kv/kvserver/store.go b/pkg/kv/kvserver/store.go index bacaaa687d45..a8e41f1f4a6f 100644 --- a/pkg/kv/kvserver/store.go +++ b/pkg/kv/kvserver/store.go @@ -30,8 +30,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvbase" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/closedts/container" @@ -365,7 +365,7 @@ func (rs *storeReplicaVisitor) EstimatedCount() int { type Store struct { Ident *roachpb.StoreIdent // pointer to catch access before Start() is called cfg StoreConfig - db *client.DB + db *kv.DB engine storage.Engine // The underlying key-value store compactor *compactor.Compactor // Schedules compaction of the engine tsCache tscache.Cache // Most recent timestamps for keys / key ranges @@ -591,7 +591,7 @@ type Store struct { computeInitialMetrics sync.Once } -var _ client.Sender = &Store{} +var _ kv.Sender = &Store{} // A StoreConfig encompasses the auxiliary objects and configuration // required to create a store. @@ -605,7 +605,7 @@ type StoreConfig struct { DefaultSystemZoneConfig *zonepb.ZoneConfig Settings *cluster.Settings Clock *hlc.Clock - DB *client.DB + DB *kv.DB Gossip *gossip.Gossip NodeLiveness *NodeLiveness StorePool *StorePool @@ -2047,7 +2047,7 @@ func (s *Store) Clock() *hlc.Clock { return s.cfg.Clock } func (s *Store) Engine() storage.Engine { return s.engine } // DB accessor. -func (s *Store) DB() *client.DB { return s.cfg.DB } +func (s *Store) DB() *kv.DB { return s.cfg.DB } // Gossip accessor. func (s *Store) Gossip() *gossip.Gossip { return s.cfg.Gossip } diff --git a/pkg/kv/kvserver/store_test.go b/pkg/kv/kvserver/store_test.go index 52fc77d78062..54a359f3d301 100644 --- a/pkg/kv/kvserver/store_test.go +++ b/pkg/kv/kvserver/store_test.go @@ -28,8 +28,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/rditer" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/stateloader" @@ -67,8 +67,8 @@ var testIdent = roachpb.StoreIdent{ StoreID: 1, } -func (s *Store) TestSender() client.Sender { - return client.Wrap(s, func(ba roachpb.BatchRequest) roachpb.BatchRequest { +func (s *Store) TestSender() kv.Sender { + return kv.Wrap(s, func(ba roachpb.BatchRequest) roachpb.BatchRequest { if ba.RangeID != 0 { return ba } @@ -109,8 +109,8 @@ type testSenderFactory struct { func (f *testSenderFactory) RootTransactionalSender( txn *roachpb.Transaction, _ roachpb.UserPriority, -) client.TxnSender { - return client.NewMockTransactionalSender( +) kv.TxnSender { + return kv.NewMockTransactionalSender( func( ctx context.Context, _ *roachpb.Transaction, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { @@ -119,10 +119,8 @@ func (f *testSenderFactory) RootTransactionalSender( txn) } -func (f *testSenderFactory) LeafTransactionalSender( - tis *roachpb.LeafTxnInputState, -) client.TxnSender { - return client.NewMockTransactionalSender( +func (f *testSenderFactory) LeafTransactionalSender(tis *roachpb.LeafTxnInputState) kv.TxnSender { + return kv.NewMockTransactionalSender( func( ctx context.Context, _ *roachpb.Transaction, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { @@ -131,7 +129,7 @@ func (f *testSenderFactory) LeafTransactionalSender( &tis.Txn) } -func (f *testSenderFactory) NonTransactionalSender() client.Sender { +func (f *testSenderFactory) NonTransactionalSender() kv.Sender { if f.nonTxnSender != nil { return f.nonTxnSender } @@ -230,7 +228,7 @@ func createTestStoreWithoutStart( stopper.AddCloser(eng) cfg.Transport = NewDummyRaftTransport(cfg.Settings) factory := &testSenderFactory{} - cfg.DB = client.NewDB(cfg.AmbientCtx, factory, cfg.Clock) + cfg.DB = kv.NewDB(cfg.AmbientCtx, factory, cfg.Clock) store := NewStore(context.TODO(), *cfg, eng, &roachpb.NodeDescriptor{NodeID: 1}) factory.setStore(store) if err := InitEngine( @@ -430,7 +428,7 @@ func TestStoreInitAndBootstrap(t *testing.T) { stopper.AddCloser(eng) cfg.Transport = NewDummyRaftTransport(cfg.Settings) factory := &testSenderFactory{} - cfg.DB = client.NewDB(cfg.AmbientCtx, factory, cfg.Clock) + cfg.DB = kv.NewDB(cfg.AmbientCtx, factory, cfg.Clock) { store := NewStore(ctx, cfg, eng, &roachpb.NodeDescriptor{NodeID: 1}) // Can't start as haven't bootstrapped. @@ -1009,11 +1007,11 @@ func TestStoreSend(t *testing.T) { gArgs := getArgs([]byte("a")) // Try a successful get request. - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), &gArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &gArgs); pErr != nil { t.Fatal(pErr) } pArgs := putArgs([]byte("a"), []byte("aaa")) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), &pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &pArgs); pErr != nil { t.Fatal(pErr) } } @@ -1093,7 +1091,7 @@ func TestStoreObservedTimestamp(t *testing.T) { Replica: desc, } assignSeqNumsForReqs(txn, &pArgs) - pReply, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), h, &pArgs) + pReply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &pArgs) test.check(manual.UnixNano(), pReply, pErr) }() } @@ -1180,29 +1178,29 @@ func TestStoreVerifyKeys(t *testing.T) { store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) // Try a start key == KeyMax. gArgs := getArgs(roachpb.KeyMax) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), &gArgs); !testutils.IsPError(pErr, "must be less than KeyMax") { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &gArgs); !testutils.IsPError(pErr, "must be less than KeyMax") { t.Fatalf("expected error for start key == KeyMax: %v", pErr) } // Try a get with an end key specified (get requires only a start key and should fail). gArgs.EndKey = roachpb.KeyMax - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), &gArgs); !testutils.IsPError(pErr, "must be less than KeyMax") { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &gArgs); !testutils.IsPError(pErr, "must be less than KeyMax") { t.Fatalf("unexpected error for end key specified on a non-range-based operation: %v", pErr) } // Try a scan with end key < start key. sArgs := scanArgs([]byte("b"), []byte("a")) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), sArgs); !testutils.IsPError(pErr, "must be greater than") { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), sArgs); !testutils.IsPError(pErr, "must be greater than") { t.Fatalf("unexpected error for end key < start: %v", pErr) } // Try a scan with start key == end key. sArgs.Key = []byte("a") sArgs.EndKey = sArgs.Key - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), sArgs); !testutils.IsPError(pErr, "must be greater than") { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), sArgs); !testutils.IsPError(pErr, "must be greater than") { t.Fatalf("unexpected error for start == end key: %v", pErr) } // Try a scan with range-local start key, but "regular" end key. sArgs.Key = keys.MakeRangeKey([]byte("test"), []byte("sffx"), nil) sArgs.EndKey = []byte("z") - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), sArgs); !testutils.IsPError(pErr, "range-local") { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), sArgs); !testutils.IsPError(pErr, "range-local") { t.Fatalf("unexpected error for local start, non-local end key: %v", pErr) } @@ -1210,7 +1208,7 @@ func TestStoreVerifyKeys(t *testing.T) { // length, but is accepted because of the meta prefix. meta2KeyMax := testutils.MakeKey(keys.Meta2Prefix, roachpb.RKeyMax) pArgs := putArgs(meta2KeyMax, []byte("value")) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), &pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &pArgs); pErr != nil { t.Fatalf("unexpected error on put to meta2 value: %s", pErr) } // Try to put a range descriptor record for a start key which is @@ -1218,14 +1216,14 @@ func TestStoreVerifyKeys(t *testing.T) { key := append([]byte{}, roachpb.RKeyMax...) key[len(key)-1] = 0x01 pArgs = putArgs(keys.RangeDescriptorKey(key), []byte("value")) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), &pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &pArgs); pErr != nil { t.Fatalf("unexpected error on put to range descriptor for KeyMax value: %s", pErr) } // Try a put to txn record for a meta2 key (note that this doesn't // actually happen in practice, as txn records are not put directly, // but are instead manipulated only through txn methods). pArgs = putArgs(keys.TransactionKey(meta2KeyMax, uuid.MakeV4()), []byte("value")) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), &pArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &pArgs); pErr != nil { t.Fatalf("unexpected error on put to txn meta2 value: %s", pErr) } } @@ -1238,7 +1236,7 @@ func TestStoreSendUpdateTime(t *testing.T) { store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) args := getArgs([]byte("a")) reqTS := store.cfg.Clock.Now().Add(store.cfg.Clock.MaxOffset().Nanoseconds(), 0) - _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Timestamp: reqTS}, &args) + _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Timestamp: reqTS}, &args) if pErr != nil { t.Fatal(pErr) } @@ -1282,7 +1280,7 @@ func TestStoreSendWithClockOffset(t *testing.T) { args := getArgs([]byte("a")) // Set args timestamp to exceed max offset. reqTS := store.cfg.Clock.Now().Add(store.cfg.Clock.MaxOffset().Nanoseconds()+1, 0) - _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Timestamp: reqTS}, &args) + _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Timestamp: reqTS}, &args) if !testutils.IsPError(pErr, "remote wall time is too far ahead") { t.Errorf("unexpected error: %v", pErr) } @@ -1295,7 +1293,7 @@ func TestStoreSendBadRange(t *testing.T) { defer stopper.Stop(context.TODO()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) args := getArgs([]byte("0")) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ RangeID: 2, // no such range }, &args); pErr == nil { t.Error("expected invalid range") @@ -1348,7 +1346,7 @@ func TestStoreSendOutOfRange(t *testing.T) { // Range 1 is from KeyMin to "b", so reading "b" from range 1 should // fail because it's just after the range boundary. args := getArgs([]byte("b")) - if _, err := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if _, err := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ RangeID: 1, }, &args); err == nil { t.Error("expected key to be out of range") @@ -1357,7 +1355,7 @@ func TestStoreSendOutOfRange(t *testing.T) { // Range 2 is from "b" to KeyMax, so reading "a" from range 2 should // fail because it's before the start of the range. args = getArgs([]byte("a")) - if _, err := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if _, err := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ RangeID: repl2.RangeID, }, &args); err == nil { t.Error("expected key to be out of range") @@ -1534,7 +1532,7 @@ func TestStoreResolveWriteIntent(t *testing.T) { pArgs := putArgs(key, []byte("value")) h := roachpb.Header{Txn: pushee} assignSeqNumsForReqs(pushee, &pArgs) - if _, err := client.SendWrappedWith(context.Background(), store.TestSender(), h, &pArgs); err != nil { + if _, err := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &pArgs); err != nil { t.Fatal(err) } @@ -1543,7 +1541,7 @@ func TestStoreResolveWriteIntent(t *testing.T) { h.Txn = pusher resultCh := make(chan *roachpb.Error, 1) go func() { - _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), h, &pArgs) + _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &pArgs) resultCh <- pErr }() @@ -1568,7 +1566,7 @@ func TestStoreResolveWriteIntent(t *testing.T) { // Send an end transaction to allow the original push to complete. etArgs, h := endTxnArgs(pushee, true) assignSeqNumsForReqs(pushee, &etArgs) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs); pErr != nil { t.Fatal(pErr) } if pErr := <-resultCh; pErr != nil { @@ -1597,7 +1595,7 @@ func TestStoreResolveWriteIntentRollback(t *testing.T) { args := incrementArgs(key, 1) h := roachpb.Header{Txn: pushee} assignSeqNumsForReqs(pushee, args) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), h, args); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, args); pErr != nil { t.Fatal(pErr) } @@ -1605,7 +1603,7 @@ func TestStoreResolveWriteIntentRollback(t *testing.T) { h.Txn = pusher args.Increment = 2 assignSeqNumsForReqs(pusher, args) - if resp, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), h, args); pErr != nil { + if resp, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, args); pErr != nil { t.Errorf("expected increment to succeed: %s", pErr) } else if reply := resp.(*roachpb.IncrementResponse); reply.NewValue != 2 { t.Errorf("expected rollback of earlier increment to yield increment value of 2; got %d", reply.NewValue) @@ -1719,7 +1717,7 @@ func TestStoreResolveWriteIntentPushOnRead(t *testing.T) { // First, write original value. { args := putArgs(key, []byte("value1")) - if _, pErr := client.SendWrapped(ctx, store.TestSender(), &args); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store.TestSender(), &args); pErr != nil { t.Fatal(pErr) } } @@ -1729,7 +1727,7 @@ func TestStoreResolveWriteIntentPushOnRead(t *testing.T) { args := putArgs(key, []byte("value2")) assignSeqNumsForReqs(pushee, &args) h := roachpb.Header{Txn: pushee} - if _, pErr := client.SendWrappedWith(ctx, store.TestSender(), h, &args); pErr != nil { + if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), h, &args); pErr != nil { t.Fatal(pErr) } } @@ -1747,7 +1745,7 @@ func TestStoreResolveWriteIntentPushOnRead(t *testing.T) { pushee.WriteTimestamp.Forward(pushedTs) pushee.ReadTimestamp.Forward(pushedTs) hb, hbH := heartbeatArgs(pushee, store.cfg.Clock.Now()) - if _, pErr := client.SendWrappedWith(ctx, store.TestSender(), hbH, &hb); pErr != nil { + if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), hbH, &hb); pErr != nil { t.Fatal(pErr) } } @@ -1756,7 +1754,7 @@ func TestStoreResolveWriteIntentPushOnRead(t *testing.T) { if tc.pusheeStagingRecord { et, etH := endTxnArgs(pushee, true) et.InFlightWrites = []roachpb.SequencedWrite{{Key: []byte("keyA"), Sequence: 1}} - etReply, pErr := client.SendWrappedWith(ctx, store.TestSender(), etH, &et) + etReply, pErr := kv.SendWrappedWith(ctx, store.TestSender(), etH, &et) if pErr != nil { t.Fatal(pErr) } @@ -1770,7 +1768,7 @@ func TestStoreResolveWriteIntentPushOnRead(t *testing.T) { pusher.WriteTimestamp.Forward(readTs) gArgs := getArgs(key) assignSeqNumsForReqs(pusher, &gArgs) - repl, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: pusher}, &gArgs) + repl, pErr := kv.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{Txn: pusher}, &gArgs) if tc.expPushError == "" { if pErr != nil { t.Errorf("expected read to succeed: %s", pErr) @@ -1789,7 +1787,7 @@ func TestStoreResolveWriteIntentPushOnRead(t *testing.T) { // the commit succeeds or fails. etArgs, etH := endTxnArgs(pushee, true) assignSeqNumsForReqs(pushee, &etArgs) - _, pErr = client.SendWrappedWith(ctx, store.TestSender(), etH, &etArgs) + _, pErr = kv.SendWrappedWith(ctx, store.TestSender(), etH, &etArgs) if tc.expPusheeRetry { if _, ok := pErr.GetDetail().(*roachpb.TransactionRetryError); !ok { t.Errorf("expected transaction retry error; got %s", pErr) @@ -1816,14 +1814,14 @@ func TestStoreResolveWriteIntentNoTxn(t *testing.T) { // First, write the pushee's txn via HeartbeatTxn request. hb, hbH := heartbeatArgs(pushee, pushee.WriteTimestamp) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), hbH, &hb); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), hbH, &hb); pErr != nil { t.Fatal(pErr) } // Next, lay down intent from pushee. args := putArgs(key, []byte("value1")) assignSeqNumsForReqs(pushee, &args) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), hbH, &args); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), hbH, &args); pErr != nil { t.Fatal(pErr) } @@ -1831,7 +1829,7 @@ func TestStoreResolveWriteIntentNoTxn(t *testing.T) { getTS := store.cfg.Clock.Now() // accessed later { gArgs := getArgs(key) - if reply, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ Timestamp: getTS, UserPriority: roachpb.MaxUserPriority, }, &gArgs); pErr != nil { @@ -1845,7 +1843,7 @@ func TestStoreResolveWriteIntentNoTxn(t *testing.T) { // Next, try to write outside of a transaction. We will succeed in pushing txn. putTS := store.cfg.Clock.Now() args.Value.SetBytes([]byte("value2")) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ Timestamp: putTS, UserPriority: roachpb.MaxUserPriority, }, &args); pErr != nil { @@ -1882,7 +1880,7 @@ func TestStoreResolveWriteIntentNoTxn(t *testing.T) { // been aborted. etArgs, h := endTxnArgs(pushee, true) assignSeqNumsForReqs(pushee, &etArgs) - _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs) + _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs) if pErr == nil { t.Errorf("unexpected success committing transaction") } @@ -1920,7 +1918,7 @@ func TestStoreReadInconsistent(t *testing.T) { // First, write keyA. args := putArgs(keyA, []byte("value1")) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), &args); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), &args); pErr != nil { t.Fatal(pErr) } @@ -1937,14 +1935,14 @@ func TestStoreReadInconsistent(t *testing.T) { for _, txn := range []*roachpb.Transaction{txnA, txnB} { args.Key = txn.Key assignSeqNumsForReqs(txn, &args) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn}, &args); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn}, &args); pErr != nil { t.Fatal(pErr) } } // End txn B, but without resolving the intent. etArgs, h := endTxnArgs(txnB, true) assignSeqNumsForReqs(txnB, &etArgs) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs); pErr != nil { t.Fatal(pErr) } @@ -1952,7 +1950,7 @@ func TestStoreReadInconsistent(t *testing.T) { // will be able to read with both INCONSISTENT and READ_UNCOMMITTED. // With READ_UNCOMMITTED, we'll also be able to see the intent's value. gArgs := getArgs(keyA) - if reply, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ ReadConsistency: rc, }, &gArgs); pErr != nil { t.Errorf("expected read to succeed: %s", pErr) @@ -1977,7 +1975,7 @@ func TestStoreReadInconsistent(t *testing.T) { } gArgs.Key = keyB - if reply, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ ReadConsistency: rc, }, &gArgs); pErr != nil { t.Errorf("expected read to succeed: %s", pErr) @@ -2002,7 +2000,7 @@ func TestStoreReadInconsistent(t *testing.T) { // However, it will be read eventually, as B's intent can be // resolved asynchronously as txn B is committed. testutils.SucceedsSoon(t, func() error { - if reply, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ ReadConsistency: rc, }, &gArgs); pErr != nil { return errors.Errorf("expected read to succeed: %s", pErr) @@ -2018,7 +2016,7 @@ func TestStoreReadInconsistent(t *testing.T) { // Scan keys and verify results. sArgs := scanArgs(keyA, keyB.Next()) - reply, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ ReadConsistency: rc, }, sArgs) if pErr != nil { @@ -2057,7 +2055,7 @@ func TestStoreReadInconsistent(t *testing.T) { // Reverse scan keys and verify results. rsArgs := revScanArgs(keyA, keyB.Next()) - reply, pErr = client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + reply, pErr = kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ ReadConsistency: rc, }, rsArgs) if pErr != nil { @@ -2115,7 +2113,7 @@ func TestStoreScanResumeTSCache(t *testing.T) { for _, keyStr := range []string{"a", "b", "c"} { key := roachpb.Key(keyStr) putArgs := putArgs(key, []byte("value")) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), h, &putArgs); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &putArgs); pErr != nil { t.Fatal(pErr) } } @@ -2127,7 +2125,7 @@ func TestStoreScanResumeTSCache(t *testing.T) { manualClock.Set(t1.Nanoseconds()) h.Timestamp = makeTS(t1.Nanoseconds(), 0) h.MaxSpanRequestKeys = 2 - reply, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), h, sArgs) + reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, sArgs) if pErr != nil { t.Fatal(pErr) } @@ -2155,7 +2153,7 @@ func TestStoreScanResumeTSCache(t *testing.T) { manualClock.Set(t2.Nanoseconds()) h.Timestamp = makeTS(t2.Nanoseconds(), 0) rsArgs := revScanArgs(span.Key, span.EndKey) - reply, pErr = client.SendWrappedWith(context.Background(), store.TestSender(), h, rsArgs) + reply, pErr = kv.SendWrappedWith(context.Background(), store.TestSender(), h, rsArgs) if pErr != nil { t.Fatal(pErr) } @@ -2238,7 +2236,7 @@ func TestStoreScanIntents(t *testing.T) { } args := putArgs(key, []byte(fmt.Sprintf("value%02d", j))) assignSeqNumsForReqs(txn, &args) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn}, &args); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn}, &args); pErr != nil { t.Fatal(pErr) } } @@ -2254,7 +2252,7 @@ func TestStoreScanIntents(t *testing.T) { } errChan := make(chan *roachpb.Error, 1) go func() { - reply, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ Timestamp: ts, ReadConsistency: consistency, }, sArgs) @@ -2289,7 +2287,7 @@ func TestStoreScanIntents(t *testing.T) { etArgs.LockSpans = append(etArgs.LockSpans, roachpb.Span{Key: key}) } assignSeqNumsForReqs(txn, &etArgs) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs); pErr != nil { t.Fatal(pErr) } <-errChan @@ -2331,7 +2329,7 @@ func TestStoreScanInconsistentResolvesIntents(t *testing.T) { keys = append(keys, key) args := putArgs(key, []byte(fmt.Sprintf("value%02d", j))) assignSeqNumsForReqs(txn, &args) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn}, &args); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn}, &args); pErr != nil { t.Fatal(pErr) } } @@ -2341,7 +2339,7 @@ func TestStoreScanInconsistentResolvesIntents(t *testing.T) { // attempts to resolve the intents would fail. etArgs, h := endTxnArgs(txn, true) assignSeqNumsForReqs(txn, &etArgs) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), h, &etArgs); pErr != nil { t.Fatal(pErr) } @@ -2350,7 +2348,7 @@ func TestStoreScanInconsistentResolvesIntents(t *testing.T) { // Scan the range repeatedly until we've verified count. sArgs := scanArgs(keys[0], keys[9].Next()) testutils.SucceedsSoon(t, func() error { - if reply, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ + if reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ ReadConsistency: roachpb.INCONSISTENT, }, sArgs); pErr != nil { return pErr.GoError() @@ -2377,7 +2375,7 @@ func TestStoreScanIntentsFromTwoTxns(t *testing.T) { txn1 := newTransaction("test1", key1, 1, store.cfg.Clock) args := putArgs(key1, []byte("value1")) assignSeqNumsForReqs(txn1, &args) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn1}, &args); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn1}, &args); pErr != nil { t.Fatal(pErr) } @@ -2385,7 +2383,7 @@ func TestStoreScanIntentsFromTwoTxns(t *testing.T) { txn2 := newTransaction("test2", key2, 1, store.cfg.Clock) args = putArgs(key2, []byte("value2")) assignSeqNumsForReqs(txn2, &args) - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn2}, &args); pErr != nil { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{Txn: txn2}, &args); pErr != nil { t.Fatal(pErr) } @@ -2397,7 +2395,7 @@ func TestStoreScanIntentsFromTwoTxns(t *testing.T) { // Scan the range and verify empty result (expired txn is aborted, // cleaning up intents). sArgs := scanArgs(key1, key2.Next()) - if reply, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{}, sArgs); pErr != nil { + if reply, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{}, sArgs); pErr != nil { t.Fatal(pErr) } else if sReply := reply.(*roachpb.ScanResponse); len(sReply.Rows) != 0 { t.Errorf("expected empty result; got %+v", sReply.Rows) @@ -2448,7 +2446,7 @@ func TestStoreScanMultipleIntents(t *testing.T) { // Query the range with a single scan, which should cause all intents // to be resolved. sArgs := scanArgs(key1, key10.Next()) - if _, pErr := client.SendWrapped(context.Background(), store.TestSender(), sArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), store.TestSender(), sArgs); pErr != nil { t.Fatal(pErr) } @@ -2523,7 +2521,7 @@ func TestStoreBadRequests(t *testing.T) { if test.header.Txn != nil { assignSeqNumsForReqs(test.header.Txn, test.args) } - if _, pErr := client.SendWrappedWith(context.Background(), store.TestSender(), *test.header, test.args); !testutils.IsPError(pErr, test.err) { + if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), *test.header, test.args); !testutils.IsPError(pErr, test.err) { t.Errorf("%d expected error %q, got error %v", i, test.err, pErr) } }) diff --git a/pkg/kv/kvserver/stores.go b/pkg/kv/kvserver/stores.go index 0057de4be580..4ce53361ce5e 100644 --- a/pkg/kv/kvserver/stores.go +++ b/pkg/kv/kvserver/stores.go @@ -17,8 +17,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -52,7 +52,7 @@ type Stores struct { } } -var _ client.Sender = &Stores{} // Stores implements the client.Sender interface +var _ kv.Sender = &Stores{} // Stores implements the client.Sender interface var _ gossip.Storage = &Stores{} // Stores implements the gossip.Storage interface // NewStores returns a local-only sender which directly accesses diff --git a/pkg/kv/kvserver/ts_maintenance_queue.go b/pkg/kv/kvserver/ts_maintenance_queue.go index 02d9b192aba5..0af9f20b4d33 100644 --- a/pkg/kv/kvserver/ts_maintenance_queue.go +++ b/pkg/kv/kvserver/ts_maintenance_queue.go @@ -16,7 +16,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -45,7 +45,7 @@ type TimeSeriesDataStore interface { storage.Reader, roachpb.RKey, roachpb.RKey, - *client.DB, + *kv.DB, *mon.BytesMonitor, int64, hlc.Timestamp, @@ -81,14 +81,14 @@ type timeSeriesMaintenanceQueue struct { *baseQueue tsData TimeSeriesDataStore replicaCountFn func() int - db *client.DB + db *kv.DB mem mon.BytesMonitor } // newTimeSeriesMaintenanceQueue returns a new instance of // timeSeriesMaintenanceQueue. func newTimeSeriesMaintenanceQueue( - store *Store, db *client.DB, g *gossip.Gossip, tsData TimeSeriesDataStore, + store *Store, db *kv.DB, g *gossip.Gossip, tsData TimeSeriesDataStore, ) *timeSeriesMaintenanceQueue { q := &timeSeriesMaintenanceQueue{ tsData: tsData, diff --git a/pkg/kv/kvserver/ts_maintenance_queue_test.go b/pkg/kv/kvserver/ts_maintenance_queue_test.go index 008f5c1d9b72..de6dda586c14 100644 --- a/pkg/kv/kvserver/ts_maintenance_queue_test.go +++ b/pkg/kv/kvserver/ts_maintenance_queue_test.go @@ -20,7 +20,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" @@ -65,7 +65,7 @@ func (m *modelTimeSeriesDataStore) MaintainTimeSeries( ctx context.Context, snapshot storage.Reader, start, end roachpb.RKey, - db *client.DB, + db *kv.DB, _ *mon.BytesMonitor, _ int64, now hlc.Timestamp, @@ -119,7 +119,7 @@ func TestTimeSeriesMaintenanceQueue(t *testing.T) { for _, k := range splitKeys { repl := store.LookupReplica(roachpb.RKey(k)) args := adminSplitArgs(k) - if _, pErr := client.SendWrappedWith(ctx, store, roachpb.Header{ + if _, pErr := kv.SendWrappedWith(ctx, store, roachpb.Header{ RangeID: repl.RangeID, }, args); pErr != nil { t.Fatal(pErr) diff --git a/pkg/kv/kvserver/txn_recovery_integration_test.go b/pkg/kv/kvserver/txn_recovery_integration_test.go index 8f3770d1afe5..c48020f214c6 100644 --- a/pkg/kv/kvserver/txn_recovery_integration_test.go +++ b/pkg/kv/kvserver/txn_recovery_integration_test.go @@ -15,7 +15,7 @@ import ( "context" "testing" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/txnwait" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -47,7 +47,7 @@ func TestTxnRecoveryFromStaging(t *testing.T) { pArgs := putArgs(keyA, keyAVal) pArgs.Sequence = 1 h := roachpb.Header{Txn: txn} - if _, pErr := client.SendWrappedWith(ctx, store.TestSender(), h, &pArgs); pErr != nil { + if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), h, &pArgs); pErr != nil { t.Fatal(pErr) } @@ -56,14 +56,14 @@ func TestTxnRecoveryFromStaging(t *testing.T) { // at its desired timestamp. This prevents an implicit commit state. if !commit { gArgs := getArgs(keyB) - if _, pErr := client.SendWrapped(ctx, store.TestSender(), &gArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, store.TestSender(), &gArgs); pErr != nil { t.Fatal(pErr) } } pArgs = putArgs(keyB, []byte("value2")) pArgs.Sequence = 2 - if _, pErr := client.SendWrappedWith(ctx, store.TestSender(), h, &pArgs); pErr != nil { + if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), h, &pArgs); pErr != nil { t.Fatal(pErr) } @@ -74,7 +74,7 @@ func TestTxnRecoveryFromStaging(t *testing.T) { {Key: keyA, Sequence: 1}, {Key: keyB, Sequence: 2}, } - etReply, pErr := client.SendWrappedWith(ctx, store.TestSender(), etH, &et) + etReply, pErr := kv.SendWrappedWith(ctx, store.TestSender(), etH, &et) if pErr != nil { t.Fatal(pErr) } @@ -90,7 +90,7 @@ func TestTxnRecoveryFromStaging(t *testing.T) { manual.Increment(txnwait.TxnLivenessThreshold.Nanoseconds() + 1) gArgs := getArgs(keyA) - gReply, pErr := client.SendWrapped(ctx, store.TestSender(), &gArgs) + gReply, pErr := kv.SendWrapped(ctx, store.TestSender(), &gArgs) if pErr != nil { t.Fatal(pErr) } @@ -110,7 +110,7 @@ func TestTxnRecoveryFromStaging(t *testing.T) { // Query the transaction and verify that it has the right status. qtArgs := queryTxnArgs(txn.TxnMeta, false /* waitForUpdate */) - qtReply, pErr := client.SendWrapped(ctx, store.TestSender(), &qtArgs) + qtReply, pErr := kv.SendWrapped(ctx, store.TestSender(), &qtArgs) if pErr != nil { t.Fatal(pErr) } diff --git a/pkg/kv/kvserver/txnrecovery/manager.go b/pkg/kv/kvserver/txnrecovery/manager.go index fa4aff42e387..cb62275d3d95 100644 --- a/pkg/kv/kvserver/txnrecovery/manager.go +++ b/pkg/kv/kvserver/txnrecovery/manager.go @@ -14,7 +14,7 @@ import ( "context" "sort" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -60,7 +60,7 @@ type manager struct { log.AmbientContext clock *hlc.Clock - db *client.DB + db *kv.DB stopper *stop.Stopper metrics Metrics txns singleflight.Group @@ -68,9 +68,7 @@ type manager struct { } // NewManager returns an implementation of a transaction recovery Manager. -func NewManager( - ac log.AmbientContext, clock *hlc.Clock, db *client.DB, stopper *stop.Stopper, -) Manager { +func NewManager(ac log.AmbientContext, clock *hlc.Clock, db *kv.DB, stopper *stop.Stopper) Manager { ac.AddLogTag("txn-recovery", nil) return &manager{ AmbientContext: ac, @@ -237,7 +235,7 @@ func (m *manager) resolveIndeterminateCommitForTxnProbe( // Loop until either the transaction is observed to change, an in-flight // write is prevented, or we run out of in-flight writes to query. for len(queryIntentReqs) > 0 { - var b client.Batch + var b kv.Batch b.Header.Timestamp = m.clock.Now() b.AddRawRequest(&queryTxnReq) for i := 0; i < defaultBatchSize && len(queryIntentReqs) > 0; i++ { @@ -290,7 +288,7 @@ func (m *manager) resolveIndeterminateCommitForTxnProbe( func (m *manager) resolveIndeterminateCommitForTxnRecover( ctx context.Context, txn *roachpb.Transaction, preventedIntent bool, ) (*roachpb.Transaction, error) { - var b client.Batch + var b kv.Batch b.Header.Timestamp = m.clock.Now() b.AddRawRequest(&roachpb.RecoverTxnRequest{ RequestHeader: roachpb.RequestHeader{ diff --git a/pkg/kv/kvserver/txnrecovery/manager_test.go b/pkg/kv/kvserver/txnrecovery/manager_test.go index 30decab39161..b01fa9386bf1 100644 --- a/pkg/kv/kvserver/txnrecovery/manager_test.go +++ b/pkg/kv/kvserver/txnrecovery/manager_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -26,11 +26,11 @@ import ( "github.com/stretchr/testify/assert" ) -func makeManager(s *client.Sender) (Manager, *hlc.Clock, *stop.Stopper) { +func makeManager(s *kv.Sender) (Manager, *hlc.Clock, *stop.Stopper) { ac := log.AmbientContext{Tracer: tracing.NewTracer()} clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) stopper := stop.NewStopper() - db := client.NewDB(ac, client.NonTransactionalFactoryFunc(func( + db := kv.NewDB(ac, kv.NonTransactionalFactoryFunc(func( ctx context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { return (*s).Send(ctx, ba) @@ -81,7 +81,7 @@ func TestResolveIndeterminateCommit(t *testing.T) { defer leaktest.AfterTest(t)() testutils.RunTrueAndFalse(t, "prevent", func(t *testing.T, prevent bool) { - var mockSender client.Sender + var mockSender kv.Sender m, clock, stopper := makeManager(&mockSender) defer stopper.Stop(context.Background()) @@ -91,7 +91,7 @@ func TestResolveIndeterminateCommit(t *testing.T) { {Key: roachpb.Key("b"), Sequence: 2}, } - mockSender = client.SenderFunc(func( + mockSender = kv.SenderFunc(func( _ context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // Probing Phase. @@ -111,7 +111,7 @@ func TestResolveIndeterminateCommit(t *testing.T) { br.Responses[1].GetInner().(*roachpb.QueryIntentResponse).FoundIntent = true br.Responses[2].GetInner().(*roachpb.QueryIntentResponse).FoundIntent = !prevent - mockSender = client.SenderFunc(func( + mockSender = kv.SenderFunc(func( _ context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // Recovery Phase. @@ -161,7 +161,7 @@ func TestResolveIndeterminateCommit(t *testing.T) { func TestResolveIndeterminateCommitTxnChanges(t *testing.T) { defer leaktest.AfterTest(t)() - var mockSender client.Sender + var mockSender kv.Sender m, clock, stopper := makeManager(&mockSender) defer stopper.Stop(context.Background()) @@ -268,7 +268,7 @@ func TestResolveIndeterminateCommitTxnChanges(t *testing.T) { } for _, c := range testCases { t.Run(c.name, func(t *testing.T) { - mockSender = client.SenderFunc(func( + mockSender = kv.SenderFunc(func( _ context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // Probing Phase. @@ -292,7 +292,7 @@ func TestResolveIndeterminateCommitTxnChanges(t *testing.T) { br.Responses[1].GetInner().(*roachpb.QueryIntentResponse).FoundIntent = true br.Responses[2].GetInner().(*roachpb.QueryIntentResponse).FoundIntent = false - mockSender = client.SenderFunc(func( + mockSender = kv.SenderFunc(func( _ context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // Recovery Phase. @@ -335,14 +335,14 @@ func TestResolveIndeterminateCommitTxnChanges(t *testing.T) { func TestResolveIndeterminateCommitTxnWithoutInFlightWrites(t *testing.T) { defer leaktest.AfterTest(t)() - var mockSender client.Sender + var mockSender kv.Sender m, clock, stopper := makeManager(&mockSender) defer stopper.Stop(context.Background()) // Create STAGING txn without any in-flight writes. txn := makeStagingTransaction(clock) - mockSender = client.SenderFunc(func( + mockSender = kv.SenderFunc(func( _ context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // Recovery Phase. Probing phase skipped. diff --git a/pkg/kv/kvserver/txnwait/queue.go b/pkg/kv/kvserver/txnwait/queue.go index ba5ec7a6bf24..176be9b91810 100644 --- a/pkg/kv/kvserver/txnwait/queue.go +++ b/pkg/kv/kvserver/txnwait/queue.go @@ -17,7 +17,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" @@ -153,7 +153,7 @@ func (pt *pendingTxn) getDependentsSet() map[uuid.UUID]struct{} { // Config contains the dependencies to construct a Queue. type Config struct { RangeDesc *roachpb.RangeDescriptor - DB *client.DB + DB *kv.DB Clock *hlc.Clock Stopper *stop.Stopper Metrics *Metrics @@ -861,7 +861,7 @@ func (q *Queue) queryTxnStatus( dependents []uuid.UUID, now hlc.Timestamp, ) (*roachpb.Transaction, []uuid.UUID, *roachpb.Error) { - b := &client.Batch{} + b := &kv.Batch{} b.Header.Timestamp = q.cfg.Clock.Now() b.AddRawRequest(&roachpb.QueryTxnRequest{ RequestHeader: roachpb.RequestHeader{ @@ -917,7 +917,7 @@ func (q *Queue) forcePushAbort( forcePush := *req forcePush.Force = true forcePush.PushType = roachpb.PUSH_ABORT - b := &client.Batch{} + b := &kv.Batch{} b.Header.Timestamp = q.cfg.Clock.Now() b.AddRawRequest(&forcePush) if err := q.cfg.DB.Run(ctx, b); err != nil { diff --git a/pkg/kv/kvserver/txnwait/queue_test.go b/pkg/kv/kvserver/txnwait/queue_test.go index f8a85ed4b582..5c290e75bb6f 100644 --- a/pkg/kv/kvserver/txnwait/queue_test.go +++ b/pkg/kv/kvserver/txnwait/queue_test.go @@ -17,7 +17,7 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -164,7 +164,7 @@ func TestIsPushed(t *testing.T) { } } -func makeConfig(s client.SenderFunc) Config { +func makeConfig(s kv.SenderFunc) Config { var cfg Config cfg.RangeDesc = &roachpb.RangeDescriptor{ StartKey: roachpb.RKeyMin, EndKey: roachpb.RKeyMax, @@ -174,8 +174,8 @@ func makeConfig(s client.SenderFunc) Config { cfg.Stopper = stop.NewStopper() cfg.Metrics = NewMetrics(time.Minute) if s != nil { - factory := client.NonTransactionalFactoryFunc(s) - cfg.DB = client.NewDB(testutils.MakeAmbientCtx(), factory, cfg.Clock) + factory := kv.NonTransactionalFactoryFunc(s) + cfg.DB = kv.NewDB(testutils.MakeAmbientCtx(), factory, cfg.Clock) } return cfg } @@ -223,7 +223,7 @@ func TestMaybeWaitForQueryWithContextCancellation(t *testing.T) { // released. func TestPushersReleasedAfterAnyQueryTxnFindsAbortedTxn(t *testing.T) { defer leaktest.AfterTest(t)() - var mockSender client.SenderFunc + var mockSender kv.SenderFunc cfg := makeConfig(func( ctx context.Context, ba roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { diff --git a/pkg/kv/main_test.go b/pkg/kv/main_test.go index 117e740cb72d..e6286ddf7b2b 100644 --- a/pkg/kv/main_test.go +++ b/pkg/kv/main_test.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package client_test +package kv_test import ( "os" diff --git a/pkg/kv/mock_transactional_sender.go b/pkg/kv/mock_transactional_sender.go index d2f027c37647..85a22e89e0b4 100644 --- a/pkg/kv/mock_transactional_sender.go +++ b/pkg/kv/mock_transactional_sender.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package client +package kv import ( "context" diff --git a/pkg/kv/range_lookup.go b/pkg/kv/range_lookup.go index 7c87b2551664..1cdf6d5fa69a 100644 --- a/pkg/kv/range_lookup.go +++ b/pkg/kv/range_lookup.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package client +package kv import ( "context" diff --git a/pkg/kv/range_lookup_test.go b/pkg/kv/range_lookup_test.go index 0fa7988fd93c..9a4f606b9536 100644 --- a/pkg/kv/range_lookup_test.go +++ b/pkg/kv/range_lookup_test.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package client +package kv import ( "context" diff --git a/pkg/kv/sender.go b/pkg/kv/sender.go index ad356078824a..05b35ecc52bc 100644 --- a/pkg/kv/sender.go +++ b/pkg/kv/sender.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package client +package kv import ( "context" diff --git a/pkg/kv/txn.go b/pkg/kv/txn.go index 488092ee58be..5e73785f592b 100644 --- a/pkg/kv/txn.go +++ b/pkg/kv/txn.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package client +package kv import ( "context" diff --git a/pkg/kv/txn_test.go b/pkg/kv/txn_test.go index 4ea39c23ad7d..003ea4bffb38 100644 --- a/pkg/kv/txn_test.go +++ b/pkg/kv/txn_test.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package client +package kv import ( "context" diff --git a/pkg/kv/util.go b/pkg/kv/util.go index b7cf20b62fa0..2bb6058a135a 100644 --- a/pkg/kv/util.go +++ b/pkg/kv/util.go @@ -8,7 +8,7 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -package client +package kv import ( "fmt" diff --git a/pkg/server/admin.go b/pkg/server/admin.go index 86cb8502819d..d4f4d4f80384 100644 --- a/pkg/server/admin.go +++ b/pkg/server/admin.go @@ -26,9 +26,9 @@ import ( "github.com/cockroachdb/apd" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagepb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -1668,14 +1668,14 @@ func (s *adminServer) DecommissionStatus( // Compute the replica counts for the target nodes only. This map doubles as // a lookup table to check whether we care about a given node. var replicaCounts map[roachpb.NodeID]int64 - if err := s.server.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := s.server.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { const pageSize = 10000 replicaCounts = make(map[roachpb.NodeID]int64) for _, nodeID := range nodeIDs { replicaCounts[nodeID] = 0 } return txn.Iterate(ctx, keys.MetaMin, keys.MetaMax, pageSize, - func(rows []client.KeyValue) error { + func(rows []kv.KeyValue) error { rangeDesc := roachpb.RangeDescriptor{} for _, row := range rows { if err := row.ValueProto(&rangeDesc); err != nil { @@ -1835,7 +1835,7 @@ func (s *adminServer) DataDistribution( } // Get replica counts. - if err := s.server.db.Txn(ctx, func(txnCtx context.Context, txn *client.Txn) error { + if err := s.server.db.Txn(ctx, func(txnCtx context.Context, txn *kv.Txn) error { acct := s.memMonitor.MakeBoundAccount() defer acct.Close(txnCtx) diff --git a/pkg/server/intent_test.go b/pkg/server/intent_test.go index c5977dc6cd66..5417534a79da 100644 --- a/pkg/server/intent_test.go +++ b/pkg/server/intent_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -120,7 +120,7 @@ func TestIntentResolution(t *testing.T) { t.Fatal(err) } - if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() if tc.keys[0] >= string(splitKey) { t.Fatalf("first key %s must be < split key %s", tc.keys[0], splitKey) diff --git a/pkg/server/node.go b/pkg/server/node.go index d7928f1565f5..c6f92c2148e6 100644 --- a/pkg/server/node.go +++ b/pkg/server/node.go @@ -23,8 +23,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -164,8 +164,8 @@ type Node struct { // allocateNodeID increments the node id generator key to allocate // a new, unique node id. -func allocateNodeID(ctx context.Context, db *client.DB) (roachpb.NodeID, error) { - val, err := client.IncrementValRetryable(ctx, db, keys.NodeIDGenerator, 1) +func allocateNodeID(ctx context.Context, db *kv.DB) (roachpb.NodeID, error) { + val, err := kv.IncrementValRetryable(ctx, db, keys.NodeIDGenerator, 1) if err != nil { return 0, errors.Wrap(err, "unable to allocate node ID") } @@ -176,9 +176,9 @@ func allocateNodeID(ctx context.Context, db *client.DB) (roachpb.NodeID, error) // specified node to allocate count new, unique store ids. The // first ID in a contiguous range is returned on success. func allocateStoreIDs( - ctx context.Context, nodeID roachpb.NodeID, count int64, db *client.DB, + ctx context.Context, nodeID roachpb.NodeID, count int64, db *kv.DB, ) (roachpb.StoreID, error) { - val, err := client.IncrementValRetryable(ctx, db, keys.StoreIDGenerator, count) + val, err := kv.IncrementValRetryable(ctx, db, keys.StoreIDGenerator, count) if err != nil { return 0, errors.Wrapf(err, "unable to allocate %d store IDs for node %d", count, nodeID) } @@ -864,7 +864,7 @@ func (n *Node) recordJoinEvent() { retryOpts := base.DefaultRetryOptions() retryOpts.Closer = n.stopper.ShouldStop() for r := retry.Start(retryOpts); r.Next(); { - if err := n.storeCfg.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := n.storeCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return n.eventLogger.InsertEventRecord( ctx, txn, diff --git a/pkg/server/node_test.go b/pkg/server/node_test.go index 41e57b4e1905..79580f9b199e 100644 --- a/pkg/server/node_test.go +++ b/pkg/server/node_test.go @@ -26,8 +26,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/gossip/resolver" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -95,7 +95,7 @@ func createTestNode( }, distSender, ) - cfg.DB = client.NewDB(cfg.AmbientCtx, tsf, cfg.Clock) + cfg.DB = kv.NewDB(cfg.AmbientCtx, tsf, cfg.Clock) cfg.Transport = kvserver.NewDummyRaftTransport(st) active, renewal := cfg.NodeLivenessDurations() cfg.HistogramWindowInterval = metric.TestSampleInterval diff --git a/pkg/server/server.go b/pkg/server/server.go index f32eb59132c4..44dd04703d16 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -34,10 +34,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/blobs/blobspb" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobsprotectedts" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/bulk" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" @@ -178,7 +178,7 @@ type Server struct { storePool *kvserver.StorePool tcsFactory *kvcoord.TxnCoordSenderFactory distSender *kvcoord.DistSender - db *client.DB + db *kv.DB pgServer *pgwire.Server distSQLServer *distsql.ServerImpl node *Node @@ -355,10 +355,10 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { } s.tcsFactory = kvcoord.NewTxnCoordSenderFactory(txnCoordSenderFactoryCfg, s.distSender) - dbCtx := client.DefaultDBContext() + dbCtx := kv.DefaultDBContext() dbCtx.NodeID = &s.nodeIDContainer dbCtx.Stopper = s.stopper - s.db = client.NewDBWithContext(s.cfg.AmbientCtx, s.tcsFactory, s.clock, dbCtx) + s.db = kv.NewDBWithContext(s.cfg.AmbientCtx, s.tcsFactory, s.clock, dbCtx) nlActive, nlRenewal := s.cfg.NodeLivenessDurations() @@ -629,7 +629,7 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { RuntimeStats: s.runtime, DB: s.db, Executor: internalExecutor, - FlowDB: client.NewDB(s.cfg.AmbientCtx, s.tcsFactory, s.clock), + FlowDB: kv.NewDB(s.cfg.AmbientCtx, s.tcsFactory, s.clock), RPCContext: s.rpcContext, Stopper: s.stopper, NodeID: &s.nodeIDContainer, @@ -648,7 +648,7 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { ParentMemoryMonitor: &rootSQLMemoryMonitor, BulkAdder: func( - ctx context.Context, db *client.DB, ts hlc.Timestamp, opts storagebase.BulkAdderOptions, + ctx context.Context, db *kv.DB, ts hlc.Timestamp, opts storagebase.BulkAdderOptions, ) (storagebase.BulkAdder, error) { // Attach a child memory monitor to enable control over the BulkAdder's // memory usage. @@ -1756,7 +1756,7 @@ func (s *Server) Start(ctx context.Context) error { // Run startup migrations (note: these depend on jobs subsystem running). var bootstrapVersion roachpb.Version - if err := s.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return txn.GetProto(ctx, keys.BootstrapVersionKey, &bootstrapVersion) }); err != nil { return err @@ -2177,7 +2177,7 @@ func (s *Server) Decommission(ctx context.Context, setTo bool, nodeIDs []roachpb // update, this would force a 2PC and potentially leave write intents in // the node liveness range. Better to make the event logging best effort // than to slow down future node liveness transactions. - if err := s.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return eventLogger.InsertEventRecord( ctx, txn, eventType, int32(nodeID), int32(s.NodeID()), struct{}{}, ) diff --git a/pkg/server/server_systemlog_gc.go b/pkg/server/server_systemlog_gc.go index 5e3bed7cebdf..b4bdfd5f5b16 100644 --- a/pkg/server/server_systemlog_gc.go +++ b/pkg/server/server_systemlog_gc.go @@ -15,7 +15,7 @@ import ( "fmt" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" @@ -90,7 +90,7 @@ func (s *Server) gcSystemLog( for { var rowsAffected int64 - err := s.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error row, err := s.internalExecutor.QueryRowEx( ctx, diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go index 5b1a92dc93a6..ca8fd144e8b5 100644 --- a/pkg/server/server_test.go +++ b/pkg/server/server_test.go @@ -30,8 +30,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/build" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/serverpb" @@ -172,7 +172,7 @@ func TestServerStartClock(t *testing.T) { get := &roachpb.GetRequest{ RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("a")}, } - if _, err := client.SendWrapped( + if _, err := kv.SendWrapped( context.Background(), s.DB().NonTransactionalSender(), get, ); err != nil { t.Fatal(err) @@ -350,17 +350,17 @@ func TestMultiRangeScanDeleteRange(t *testing.T) { RequestHeader: roachpb.RequestHeader{Key: writes[0]}, } get.EndKey = writes[len(writes)-1] - if _, err := client.SendWrapped(ctx, tds, get); err == nil { + if _, err := kv.SendWrapped(ctx, tds, get); err == nil { t.Errorf("able to call Get with a key range: %v", get) } var delTS hlc.Timestamp for i, k := range writes { put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k)) - if _, err := client.SendWrapped(ctx, tds, put); err != nil { + if _, err := kv.SendWrapped(ctx, tds, put); err != nil { t.Fatal(err) } scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next(), false) - reply, err := client.SendWrapped(ctx, tds, scan) + reply, err := kv.SendWrapped(ctx, tds, scan) if err != nil { t.Fatal(err) } @@ -382,7 +382,7 @@ func TestMultiRangeScanDeleteRange(t *testing.T) { }, ReturnKeys: true, } - reply, err := client.SendWrappedWith(ctx, tds, roachpb.Header{Timestamp: delTS}, del) + reply, err := kv.SendWrappedWith(ctx, tds, roachpb.Header{Timestamp: delTS}, del) if err != nil { t.Fatal(err) } @@ -396,7 +396,7 @@ func TestMultiRangeScanDeleteRange(t *testing.T) { now := s.Clock().Now() txnProto := roachpb.MakeTransaction("MyTxn", nil, 0, now, 0) - txn := client.NewTxnFromProto(ctx, db, s.NodeID(), now, client.RootTxn, &txnProto) + txn := kv.NewTxnFromProto(ctx, db, s.NodeID(), now, kv.RootTxn, &txnProto) scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next(), false) ba := roachpb.BatchRequest{} @@ -448,7 +448,7 @@ func TestMultiRangeScanWithPagination(t *testing.T) { for _, k := range tc.keys { put := roachpb.NewPut(k, roachpb.MakeValueFromBytes(k)) - if _, err := client.SendWrapped(ctx, tds, put); err != nil { + if _, err := kv.SendWrapped(ctx, tds, put); err != nil { t.Fatal(err) } } @@ -459,7 +459,7 @@ func TestMultiRangeScanWithPagination(t *testing.T) { var maxTargetBytes int64 { scan := roachpb.NewScan(tc.keys[0], tc.keys[len(tc.keys)-1].Next(), false) - resp, pErr := client.SendWrapped(ctx, tds, scan) + resp, pErr := kv.SendWrapped(ctx, tds, scan) require.Nil(t, pErr) maxTargetBytes = resp.Header().NumBytes } @@ -564,7 +564,7 @@ func TestSystemConfigGossip(t *testing.T) { } // Write a system key with the transaction marked as having a Gossip trigger. - if err := kvDB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } diff --git a/pkg/server/status.go b/pkg/server/status.go index 24ad2091b32a..dfaccf763ecc 100644 --- a/pkg/server/status.go +++ b/pkg/server/status.go @@ -35,8 +35,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/build" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagepb" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -132,7 +132,7 @@ type statusServer struct { st *cluster.Settings cfg *base.Config admin *adminServer - db *client.DB + db *kv.DB gossip *gossip.Gossip metricSource metricMarshaler nodeLiveness *kvserver.NodeLiveness @@ -151,7 +151,7 @@ func newStatusServer( st *cluster.Settings, cfg *base.Config, adminServer *adminServer, - db *client.DB, + db *kv.DB, gossip *gossip.Gossip, metricSource metricMarshaler, nodeLiveness *kvserver.NodeLiveness, @@ -1027,7 +1027,7 @@ func (s *statusServer) Nodes( startKey := keys.StatusNodePrefix endKey := startKey.PrefixEnd() - b := &client.Batch{} + b := &kv.Batch{} b.Scan(startKey, endKey) if err := s.db.Run(ctx, b); err != nil { log.Error(ctx, err) @@ -1096,7 +1096,7 @@ func (s *statusServer) Node( } key := keys.NodeStatusKey(nodeID) - b := &client.Batch{} + b := &kv.Batch{} b.Get(key) if err := s.db.Run(ctx, b); err != nil { log.Error(ctx, err) diff --git a/pkg/server/status/recorder.go b/pkg/server/status/recorder.go index 93fea9305b22..eb4ee7230d15 100644 --- a/pkg/server/status/recorder.go +++ b/pkg/server/status/recorder.go @@ -25,8 +25,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/build" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" @@ -483,7 +483,7 @@ func (mr *MetricsRecorder) GenerateNodeStatus(ctx context.Context) *statuspb.Nod // WriteNodeStatus writes the supplied summary to the given client. func (mr *MetricsRecorder) WriteNodeStatus( - ctx context.Context, db *client.DB, nodeStatus statuspb.NodeStatus, + ctx context.Context, db *kv.DB, nodeStatus statuspb.NodeStatus, ) error { mr.writeSummaryMu.Lock() defer mr.writeSummaryMu.Unlock() diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index 650fb1c00cb8..df135b0a6670 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -26,8 +26,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/tscache" @@ -317,7 +317,7 @@ func (ts *TestServer) TsDB() *ts.DB { } // DB returns the client.DB instance used by the TestServer. -func (ts *TestServer) DB() *client.DB { +func (ts *TestServer) DB() *kv.DB { if ts != nil { return ts.db } @@ -395,7 +395,7 @@ func (ts *TestServer) ExpectedInitialRangeCount() (int, error) { // ExpectedInitialRangeCount returns the expected number of ranges that should // be on the server after bootstrap. func ExpectedInitialRangeCount( - db *client.DB, defaultZoneConfig *zonepb.ZoneConfig, defaultSystemZoneConfig *zonepb.ZoneConfig, + db *kv.DB, defaultZoneConfig *zonepb.ZoneConfig, defaultSystemZoneConfig *zonepb.ZoneConfig, ) (int, error) { descriptorIDs, err := sqlmigrations.ExpectedDescriptorIDs(context.Background(), db, defaultZoneConfig, defaultSystemZoneConfig) if err != nil { @@ -681,7 +681,7 @@ func (ts *TestServer) GetFirstStoreID() roachpb.StoreID { // LookupRange returns the descriptor of the range containing key. func (ts *TestServer) LookupRange(key roachpb.Key) (roachpb.RangeDescriptor, error) { - rs, _, err := client.RangeLookup(context.Background(), ts.DB().NonTransactionalSender(), + rs, _, err := kv.RangeLookup(context.Background(), ts.DB().NonTransactionalSender(), key, roachpb.CONSISTENT, 0 /* prefetchNum */, false /* reverse */) if err != nil { return roachpb.RangeDescriptor{}, errors.Errorf( @@ -699,7 +699,7 @@ func (ts *TestServer) MergeRanges(leftKey roachpb.Key) (roachpb.RangeDescriptor, Key: leftKey, }, } - _, pErr := client.SendWrapped(ctx, ts.DB().NonTransactionalSender(), &mergeReq) + _, pErr := kv.SendWrapped(ctx, ts.DB().NonTransactionalSender(), &mergeReq) if pErr != nil { return roachpb.RangeDescriptor{}, errors.Errorf( @@ -730,7 +730,7 @@ func (ts *TestServer) SplitRange( SplitKey: splitKey, ExpirationTime: hlc.MaxTimestamp, } - _, pErr := client.SendWrapped(ctx, ts.DB().NonTransactionalSender(), &splitReq) + _, pErr := kv.SendWrapped(ctx, ts.DB().NonTransactionalSender(), &splitReq) if pErr != nil { return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{}, errors.Errorf( @@ -750,9 +750,9 @@ func (ts *TestServer) SplitRange( // be retried. Instead, the message to wrap is stored in case of // non-retryable failures and then wrapped when the full transaction fails. var wrappedMsg string - if err := ts.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := ts.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { scanMeta := func(key roachpb.RKey, reverse bool) (desc roachpb.RangeDescriptor, err error) { - var kvs []client.KeyValue + var kvs []kv.KeyValue if reverse { // Find the last range that ends at or before key. kvs, err = txn.ReverseScan( @@ -814,7 +814,7 @@ func (ts *TestServer) GetRangeLease( Key: key, }, } - leaseResp, pErr := client.SendWrappedWith( + leaseResp, pErr := kv.SendWrappedWith( ctx, ts.DB().NonTransactionalSender(), roachpb.Header{ @@ -882,7 +882,7 @@ func (ts *TestServer) ForceTableGC( }, Threshold: timestamp, } - _, pErr := client.SendWrapped(ctx, ts.distSender, &gcr) + _, pErr := kv.SendWrapped(ctx, ts.distSender, &gcr) return pErr.GoError() } diff --git a/pkg/sql/as_of_test.go b/pkg/sql/as_of_test.go index 1e21e79bda2f..4b6cd8fadcbd 100644 --- a/pkg/sql/as_of_test.go +++ b/pkg/sql/as_of_test.go @@ -18,7 +18,7 @@ import ( "github.com/cockroachdb/apd" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" @@ -300,7 +300,7 @@ func TestAsOfRetry(t *testing.T) { switch req := args.Req.(type) { case *roachpb.ScanRequest: - if client.TestingIsRangeLookupRequest(req) { + if kv.TestingIsRangeLookupRequest(req) { return nil } for key, count := range magicVals.restartCounts { diff --git a/pkg/sql/backfill.go b/pkg/sql/backfill.go index c0df56451542..48662c60b2f6 100644 --- a/pkg/sql/backfill.go +++ b/pkg/sql/backfill.go @@ -17,9 +17,9 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" @@ -106,7 +106,7 @@ func (sc *SchemaChanger) getChunkSize(chunkSize int64) int64 { } // scTxnFn is the type of functions that operates using transactions in the backfiller. -type scTxnFn func(ctx context.Context, txn *client.Txn, evalCtx *extendedEvalContext) error +type scTxnFn func(ctx context.Context, txn *kv.Txn, evalCtx *extendedEvalContext) error // historicalTxnRunner is the type of the callback used by the various // helper functions to run checks at a fixed timestamp (logically, at @@ -116,7 +116,7 @@ type historicalTxnRunner func(ctx context.Context, fn scTxnFn) error // makeFixedTimestampRunner creates a historicalTxnRunner suitable for use by the helpers. func (sc *SchemaChanger) makeFixedTimestampRunner(readAsOf hlc.Timestamp) historicalTxnRunner { runner := func(ctx context.Context, retryable scTxnFn) error { - return sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *client.Txn) error { + return sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *kv.Txn) error { // We need to re-create the evalCtx since the txn may retry. evalCtx := createSchemaChangeEvalCtx(ctx, readAsOf, sc.ieFactory) return retryable(ctx, txn, &evalCtx) @@ -128,9 +128,9 @@ func (sc *SchemaChanger) makeFixedTimestampRunner(readAsOf hlc.Timestamp) histor func (sc *SchemaChanger) fixedTimestampTxn( ctx context.Context, readAsOf hlc.Timestamp, - retryable func(ctx context.Context, txn *client.Txn) error, + retryable func(ctx context.Context, txn *kv.Txn) error, ) error { - return sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { txn.SetFixedTimestamp(ctx, readAsOf) return retryable(ctx, txn) }) @@ -506,7 +506,7 @@ func (sc *SchemaChanger) validateConstraints( readAsOf := sc.clock.Now() var tableDesc *sqlbase.TableDescriptor - if err := sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *client.Txn) error { + if err := sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *kv.Txn) error { tableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) return err }); err != nil { @@ -542,7 +542,7 @@ func (sc *SchemaChanger) validateConstraints( return err } // Each check operates at the historical timestamp. - return runHistoricalTxn(ctx, func(ctx context.Context, txn *client.Txn, evalCtx *extendedEvalContext) error { + return runHistoricalTxn(ctx, func(ctx context.Context, txn *kv.Txn, evalCtx *extendedEvalContext) error { switch c.ConstraintType { case sqlbase.ConstraintToUpdate_CHECK: if err := validateCheckInTxn(ctx, sc.leaseMgr, &evalCtx.EvalContext, desc, txn, c.Check.Name); err != nil { @@ -605,7 +605,7 @@ func (sc *SchemaChanger) validateConstraints( // It operates entirely on the current goroutine and is thus able to // reuse an existing client.Txn safely. func (sc *SchemaChanger) getTableVersion( - ctx context.Context, txn *client.Txn, tc *TableCollection, version sqlbase.DescriptorVersion, + ctx context.Context, txn *kv.Txn, tc *TableCollection, version sqlbase.DescriptorVersion, ) (*sqlbase.ImmutableTableDescriptor, error) { tableDesc, err := tc.getTableVersionByID(ctx, txn, sc.tableID, tree.ObjectLookupFlags{}) if err != nil { @@ -656,7 +656,7 @@ func (sc *SchemaChanger) truncateIndexes( } // Make a new txn just to drop this chunk. - if err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if fn := sc.execCfg.DistSQLRunTestingKnobs.RunBeforeBackfillChunk; fn != nil { if err := fn(resume); err != nil { return err @@ -707,7 +707,7 @@ func (sc *SchemaChanger) truncateIndexes( // All the data chunks have been removed. Now also removed the // zone configs for the dropped indexes, if any. - if err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return removeIndexZoneConfigs(ctx, txn, sc.execCfg, sc.tableID, dropped) }); err != nil { return err @@ -744,7 +744,7 @@ func getJobIDForMutationWithDescriptor( // It operates entirely on the current goroutine and is thus able to // reuse an existing client.Txn safely. func (sc *SchemaChanger) nRanges( - ctx context.Context, txn *client.Txn, spans []roachpb.Span, + ctx context.Context, txn *kv.Txn, spans []roachpb.Span, ) (int, error) { spanResolver := sc.distSQLPlanner.spanResolver.NewSpanResolverIterator(txn) rangeIds := make(map[int64]struct{}) @@ -837,8 +837,8 @@ func (sc *SchemaChanger) distBackfill( // cheap scan. if backfillType == indexBackfill { const pageSize = 10000 - noop := func(_ []client.KeyValue) error { return nil } - if err := sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *client.Txn) error { + noop := func(_ []kv.KeyValue) error { return nil } + if err := sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *kv.Txn) error { for _, span := range targetSpans { // TODO(dt): a Count() request would be nice here if the target isn't // empty, since we don't need to drag all the results back just to @@ -856,7 +856,7 @@ func (sc *SchemaChanger) distBackfill( // Gather the initial resume spans for the table. var todoSpans []roachpb.Span var mutationIdx int - if err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error todoSpans, _, mutationIdx, err = rowexec.GetResumeSpans( ctx, sc.jobRegistry, txn, sc.tableID, sc.mutationID, filter) @@ -867,7 +867,7 @@ func (sc *SchemaChanger) distBackfill( for len(todoSpans) > 0 { log.VEventf(ctx, 2, "backfill: process %+v spans", todoSpans) - if err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { // Report schema change progress. We define progress at this point // as the the fraction of fully-backfilled ranges of the primary index of // the table being scanned. Since we may have already modified the @@ -972,7 +972,7 @@ func (sc *SchemaChanger) distBackfill( // its done work by writing to the jobs table. // In this case we intersect todoSpans with what the old node(s) // have set in the jobs table not to overwrite their done work. - if err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error resumeSpans, _, _, err = rowexec.GetResumeSpans( ctx, sc.jobRegistry, txn, sc.tableID, sc.mutationID, filter) @@ -986,7 +986,7 @@ func (sc *SchemaChanger) distBackfill( } // Record what is left to do for the job. // TODO(spaskob): Execute this at a regular cadence. - if err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return rowexec.SetResumeSpansInJob(ctx, todoSpans, mutationIdx, txn, sc.job) }); err != nil { return err @@ -1006,7 +1006,7 @@ func (sc *SchemaChanger) updateJobRunningStatus( ctx context.Context, status jobs.RunningStatus, ) (*sqlbase.TableDescriptor, error) { var tableDesc *sqlbase.TableDescriptor - err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error tableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) if err != nil { @@ -1072,7 +1072,7 @@ func (sc *SchemaChanger) validateIndexes( readAsOf := sc.clock.Now() var tableDesc *sqlbase.TableDescriptor - if err := sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *client.Txn) error { + if err := sc.fixedTimestampTxn(ctx, readAsOf, func(ctx context.Context, txn *kv.Txn) error { tableDesc, err = sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) return err }); err != nil { @@ -1194,7 +1194,7 @@ func (sc *SchemaChanger) validateInvertedIndexes( var idxLen int64 key := tableDesc.IndexSpan(idx.ID).Key endKey := tableDesc.IndexSpan(idx.ID).EndKey - if err := runHistoricalTxn(ctx, func(ctx context.Context, txn *client.Txn, _ *extendedEvalContext) error { + if err := runHistoricalTxn(ctx, func(ctx context.Context, txn *kv.Txn, _ *extendedEvalContext) error { for { kvs, err := txn.Scan(ctx, key, endKey, 1000000) if err != nil { @@ -1238,7 +1238,7 @@ func (sc *SchemaChanger) validateInvertedIndexes( } col := idx.ColumnNames[0] - if err := runHistoricalTxn(ctx, func(ctx context.Context, txn *client.Txn, evalCtx *extendedEvalContext) error { + if err := runHistoricalTxn(ctx, func(ctx context.Context, txn *kv.Txn, evalCtx *extendedEvalContext) error { ie := evalCtx.InternalExecutor.(*InternalExecutor) row, err := ie.QueryRowEx(ctx, "verify-inverted-idx-count", txn, sqlbase.InternalExecutorSessionDataOverride{}, @@ -1309,7 +1309,7 @@ func (sc *SchemaChanger) validateForwardIndexes( // Retrieve the row count in the index. var idxLen int64 - if err := runHistoricalTxn(ctx, func(ctx context.Context, txn *client.Txn, evalCtx *extendedEvalContext) error { + if err := runHistoricalTxn(ctx, func(ctx context.Context, txn *kv.Txn, evalCtx *extendedEvalContext) error { // TODO(vivek): This is not a great API. Leaving #34304 open. ie := evalCtx.InternalExecutor.(*InternalExecutor) ie.tcModifier = tc @@ -1358,7 +1358,7 @@ func (sc *SchemaChanger) validateForwardIndexes( start := timeutil.Now() // Count the number of rows in the table. - if err := runHistoricalTxn(ctx, func(ctx context.Context, txn *client.Txn, evalCtx *extendedEvalContext) error { + if err := runHistoricalTxn(ctx, func(ctx context.Context, txn *kv.Txn, evalCtx *extendedEvalContext) error { ie := evalCtx.InternalExecutor.(*InternalExecutor) cnt, err := ie.QueryRowEx(ctx, "VERIFY INDEX", txn, sqlbase.InternalExecutorSessionDataOverride{}, @@ -1630,7 +1630,7 @@ func validateCheckInTxn( leaseMgr *LeaseManager, evalCtx *tree.EvalContext, tableDesc *MutableTableDescriptor, - txn *client.Txn, + txn *kv.Txn, checkName string, ) error { ie := evalCtx.InternalExecutor.(*InternalExecutor) @@ -1674,7 +1674,7 @@ func validateFkInTxn( leaseMgr *LeaseManager, evalCtx *tree.EvalContext, tableDesc *MutableTableDescriptor, - txn *client.Txn, + txn *kv.Txn, fkName string, ) error { ie := evalCtx.InternalExecutor.(*InternalExecutor) @@ -1716,7 +1716,7 @@ func validateFkInTxn( // reuse an existing client.Txn safely. func columnBackfillInTxn( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, tc *TableCollection, evalCtx *tree.EvalContext, tableDesc *sqlbase.ImmutableTableDescriptor, @@ -1774,7 +1774,7 @@ func columnBackfillInTxn( // It operates entirely on the current goroutine and is thus able to // reuse an existing client.Txn safely. func indexBackfillInTxn( - ctx context.Context, txn *client.Txn, tableDesc *sqlbase.ImmutableTableDescriptor, traceKV bool, + ctx context.Context, txn *kv.Txn, tableDesc *sqlbase.ImmutableTableDescriptor, traceKV bool, ) error { var backfiller backfill.IndexBackfiller if err := backfiller.Init(tableDesc); err != nil { @@ -1797,7 +1797,7 @@ func indexBackfillInTxn( // reuse an existing client.Txn safely. func indexTruncateInTxn( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, execCfg *ExecutorConfig, tableDesc *sqlbase.ImmutableTableDescriptor, traceKV bool, diff --git a/pkg/sql/backfill/backfill.go b/pkg/sql/backfill/backfill.go index f94aef273509..6c745b2e6211 100644 --- a/pkg/sql/backfill/backfill.go +++ b/pkg/sql/backfill/backfill.go @@ -15,7 +15,7 @@ package backfill import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/transform" @@ -134,7 +134,7 @@ func (cb *ColumnBackfiller) Init( // the span sp provided, for all updateCols. func (cb *ColumnBackfiller) RunColumnBackfillChunk( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, tableDesc *sqlbase.ImmutableTableDescriptor, otherTables []*sqlbase.ImmutableTableDescriptor, sp roachpb.Span, @@ -276,7 +276,7 @@ func (cb *ColumnBackfiller) RunColumnBackfillChunk( // ConvertBackfillError returns a cleaner SQL error for a failed Batch. func ConvertBackfillError( - ctx context.Context, tableDesc *sqlbase.ImmutableTableDescriptor, b *client.Batch, + ctx context.Context, tableDesc *sqlbase.ImmutableTableDescriptor, b *kv.Batch, ) error { // A backfill on a new schema element has failed and the batch contains // information useful in printing a sensible error. However @@ -377,7 +377,7 @@ func (ib *IndexBackfiller) Init(desc *sqlbase.ImmutableTableDescriptor) error { // provided, and builds all the added indexes. func (ib *IndexBackfiller) BuildIndexEntriesChunk( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, tableDesc *sqlbase.ImmutableTableDescriptor, sp roachpb.Span, chunkSize int64, @@ -440,7 +440,7 @@ func (ib *IndexBackfiller) BuildIndexEntriesChunk( // indexes. func (ib *IndexBackfiller) RunIndexBackfillChunk( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, tableDesc *sqlbase.ImmutableTableDescriptor, sp roachpb.Span, chunkSize int64, diff --git a/pkg/sql/check.go b/pkg/sql/check.go index 5777ac4a5b03..d932ac739ac3 100644 --- a/pkg/sql/check.go +++ b/pkg/sql/check.go @@ -16,7 +16,7 @@ import ( "fmt" "strings" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -37,7 +37,7 @@ func validateCheckExpr( exprStr string, tableDesc *sqlbase.TableDescriptor, ie *InternalExecutor, - txn *client.Txn, + txn *kv.Txn, ) error { expr, err := parser.ParseExpr(exprStr) if err != nil { @@ -236,7 +236,7 @@ func validateForeignKey( srcTable *sqlbase.TableDescriptor, fk *sqlbase.ForeignKeyConstraint, ie *InternalExecutor, - txn *client.Txn, + txn *kv.Txn, ) error { targetTable, err := sqlbase.GetTableDescFromID(ctx, txn, fk.ReferencedTableID) if err != nil { diff --git a/pkg/sql/colexec/cfetcher.go b/pkg/sql/colexec/cfetcher.go index 35686b0b912e..fc93d9dba194 100644 --- a/pkg/sql/colexec/cfetcher.go +++ b/pkg/sql/colexec/cfetcher.go @@ -19,8 +19,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coltypes" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/colencoding" "github.com/cockroachdb/cockroach/pkg/sql/colexec/execerror" @@ -446,7 +446,7 @@ func (rf *cFetcher) Init( // times. func (rf *cFetcher) StartScan( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, spans roachpb.Spans, limitBatches bool, limitHint int64, diff --git a/pkg/sql/colflow/colbatch_scan_test.go b/pkg/sql/colflow/colbatch_scan_test.go index 203a9d0b01f5..92e396567626 100644 --- a/pkg/sql/colflow/colbatch_scan_test.go +++ b/pkg/sql/colflow/colbatch_scan_test.go @@ -20,7 +20,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/colexec" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -70,7 +70,7 @@ func BenchmarkColBatchScan(b *testing.B) { flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, - Txn: client.NewTxn(ctx, s.DB(), s.NodeID()), + Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), NodeID: s.NodeID(), } diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index eb533c3308ba..d8ca6342b490 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -21,7 +21,7 @@ import ( "unicode/utf8" "github.com/cockroachdb/cockroach/pkg/config" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" @@ -688,7 +688,7 @@ func (s *Server) newConnExecutorWithTxn( parentMon *mon.BytesMonitor, memMetrics MemoryMetrics, srvMetrics *Metrics, - txn *client.Txn, + txn *kv.Txn, tcModifier tableCollectionModifier, resetOpt sdResetOption, ) (*connExecutor, error) { @@ -1776,7 +1776,7 @@ func (ex *connExecutor) execCopyIn( } var cm copyMachineInterface var err error - resetPlanner := func(p *planner, txn *client.Txn, txnTS time.Time, stmtTS time.Time) { + resetPlanner := func(p *planner, txn *kv.Txn, txnTS time.Time, stmtTS time.Time) { // HACK: We're reaching inside ex.state and changing sqlTimestamp by hand. // It is used by resetPlanner. Normally sqlTimestamp is updated by the // state machine, but the copyMachine manages its own transactions without @@ -2022,9 +2022,7 @@ func (ex *connExecutor) initEvalCtx(ctx context.Context, evalCtx *extendedEvalCo // return for statements executed with this evalCtx. Since generally each // statement is supposed to have a different timestamp, the evalCtx generally // shouldn't be reused across statements. -func (ex *connExecutor) resetEvalCtx( - evalCtx *extendedEvalContext, txn *client.Txn, stmtTS time.Time, -) { +func (ex *connExecutor) resetEvalCtx(evalCtx *extendedEvalContext, txn *kv.Txn, stmtTS time.Time) { evalCtx.TxnState = ex.getTransactionState() evalCtx.TxnReadOnly = ex.state.readOnly evalCtx.TxnImplicit = ex.implicitTxn() @@ -2073,11 +2071,7 @@ func (ex *connExecutor) initPlanner(ctx context.Context, p *planner) { } func (ex *connExecutor) resetPlanner( - ctx context.Context, - p *planner, - txn *client.Txn, - stmtTS time.Time, - numAnnotations tree.AnnotationIdx, + ctx context.Context, p *planner, txn *kv.Txn, stmtTS time.Time, numAnnotations tree.AnnotationIdx, ) { p.txn = txn p.stmt = nil diff --git a/pkg/sql/conn_executor_exec.go b/pkg/sql/conn_executor_exec.go index 785d49438cea..cfa58ebb9393 100644 --- a/pkg/sql/conn_executor_exec.go +++ b/pkg/sql/conn_executor_exec.go @@ -18,7 +18,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/parser" @@ -427,7 +427,7 @@ func (ex *connExecutor) execStmtInOpenState( // that all uses of SQL execution initialize the client.Txn using a // single/common function. That would be where the stepping mode // gets enabled once for all SQL statements executed "underneath". - prevSteppingMode := ex.state.mu.txn.ConfigureStepping(ctx, client.SteppingEnabled) + prevSteppingMode := ex.state.mu.txn.ConfigureStepping(ctx, kv.SteppingEnabled) defer func() { _ = ex.state.mu.txn.ConfigureStepping(ctx, prevSteppingMode) }() // Then we create a sequencing point. @@ -614,7 +614,7 @@ func (ex *connExecutor) checkTableTwoVersionInvariant(ctx context.Context) error // Create a new transaction to retry with a higher timestamp than the // timestamps used in the retry loop above. - ex.state.mu.txn = client.NewTxnWithSteppingEnabled(ctx, ex.transitionCtx.db, ex.transitionCtx.nodeID) + ex.state.mu.txn = kv.NewTxnWithSteppingEnabled(ctx, ex.transitionCtx.db, ex.transitionCtx.nodeID) if err := ex.state.mu.txn.SetUserPriority(userPriority); err != nil { return err } diff --git a/pkg/sql/conn_executor_internal_test.go b/pkg/sql/conn_executor_internal_test.go index 4163aa996eba..a7fff75c78d5 100644 --- a/pkg/sql/conn_executor_internal_test.go +++ b/pkg/sql/conn_executor_internal_test.go @@ -16,7 +16,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/distsql" @@ -245,12 +245,12 @@ func startConnExecutor( // A lot of boilerplate for creating a connExecutor. stopper := stop.NewStopper() clock := hlc.NewClock(hlc.UnixNano, 0 /* maxOffset */) - factory := client.MakeMockTxnSenderFactory( + factory := kv.MakeMockTxnSenderFactory( func(context.Context, *roachpb.Transaction, roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { return nil, nil }) - db := client.NewDB(testutils.MakeAmbientCtx(), factory, clock) + db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) st := cluster.MakeTestingClusterSettings() nodeID := &base.NodeIDContainer{} nodeID.Set(ctx, 1) diff --git a/pkg/sql/conn_executor_prepare.go b/pkg/sql/conn_executor_prepare.go index 1e8a9614ac38..a3186855f13b 100644 --- a/pkg/sql/conn_executor_prepare.go +++ b/pkg/sql/conn_executor_prepare.go @@ -14,7 +14,7 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirebase" @@ -165,7 +165,7 @@ func (ex *connExecutor) prepare( // TODO(andrei): Needing a transaction for preparing seems non-sensical, as // the prepared statement outlives the txn. I hope that it's not used for // anything other than getting a timestamp. - txn := client.NewTxn(ctx, ex.server.cfg.DB, ex.server.cfg.NodeID.Get()) + txn := kv.NewTxn(ctx, ex.server.cfg.DB, ex.server.cfg.NodeID.Get()) ex.statsCollector.reset(&ex.server.sqlStats, ex.appStats, &ex.phaseTimes) p := &ex.planner @@ -191,7 +191,7 @@ func (ex *connExecutor) prepare( // populatePrepared analyzes and type-checks the query and populates // stmt.Prepared. func (ex *connExecutor) populatePrepared( - ctx context.Context, txn *client.Txn, placeholderHints tree.PlaceholderTypes, p *planner, + ctx context.Context, txn *kv.Txn, placeholderHints tree.PlaceholderTypes, p *planner, ) (planFlags, error) { stmt := p.stmt if err := p.semaCtx.Placeholders.Init(stmt.NumPlaceholders, placeholderHints); err != nil { diff --git a/pkg/sql/conn_executor_savepoints.go b/pkg/sql/conn_executor_savepoints.go index 3a9d23ca8db7..8f2eff3e29dd 100644 --- a/pkg/sql/conn_executor_savepoints.go +++ b/pkg/sql/conn_executor_savepoints.go @@ -14,7 +14,7 @@ import ( "context" "strings" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -260,7 +260,7 @@ type savepoint struct { // client.SavepointToken.Initial()). commitOnRelease bool - kvToken client.SavepointToken + kvToken kv.SavepointToken // The number of DDL statements that had been executed in the transaction (at // the time the savepoint was created). We refuse to roll back a savepoint if diff --git a/pkg/sql/copy.go b/pkg/sql/copy.go index 7f869c4974d9..00895ec5ce3a 100644 --- a/pkg/sql/copy.go +++ b/pkg/sql/copy.go @@ -18,7 +18,7 @@ import ( "time" "unsafe" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirebase" @@ -71,7 +71,7 @@ type copyMachine struct { // resetPlanner is a function to be used to prepare the planner for inserting // data. - resetPlanner func(p *planner, txn *client.Txn, txnTS time.Time, stmtTS time.Time) + resetPlanner func(p *planner, txn *kv.Txn, txnTS time.Time, stmtTS time.Time) // execInsertPlan is a function to be used to execute the plan (stored in the // planner) which performs an INSERT. @@ -98,7 +98,7 @@ func newCopyMachine( n *tree.CopyFrom, txnOpt copyTxnOpt, execCfg *ExecutorConfig, - resetPlanner func(p *planner, txn *client.Txn, txnTS time.Time, stmtTS time.Time), + resetPlanner func(p *planner, txn *kv.Txn, txnTS time.Time, stmtTS time.Time), execInsertPlan func(ctx context.Context, p *planner, res RestrictedCommandResult) error, ) (_ *copyMachine, retErr error) { c := ©Machine{ @@ -151,7 +151,7 @@ type copyTxnOpt struct { // performed. Committing the txn is left to the higher layer. If not set, the // machine will split writes between multiple transactions that it will // initiate. - txn *client.Txn + txn *kv.Txn txnTimestamp time.Time stmtTimestamp time.Time } @@ -292,7 +292,7 @@ func (c *copyMachine) preparePlanner(ctx context.Context) func(context.Context, stmtTs := c.txnOpt.stmtTimestamp autoCommit := false if txn == nil { - txn = client.NewTxnWithSteppingEnabled(ctx, c.p.execCfg.DB, c.p.execCfg.NodeID.Get()) + txn = kv.NewTxnWithSteppingEnabled(ctx, c.p.execCfg.DB, c.p.execCfg.NodeID.Get()) txnTs = c.p.execCfg.Clock.PhysicalTime() stmtTs = txnTs autoCommit = true diff --git a/pkg/sql/copy_file_upload.go b/pkg/sql/copy_file_upload.go index 3589ec851236..2f553200ca06 100644 --- a/pkg/sql/copy_file_upload.go +++ b/pkg/sql/copy_file_upload.go @@ -18,7 +18,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/blobs" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/lex" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgwirebase" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -50,7 +50,7 @@ func newFileUploadMachine( conn pgwirebase.Conn, n *tree.CopyFrom, execCfg *ExecutorConfig, - resetPlanner func(p *planner, txn *client.Txn, txnTS time.Time, stmtTS time.Time), + resetPlanner func(p *planner, txn *kv.Txn, txnTS time.Time, stmtTS time.Time), ) (f *fileUploadMachine, retErr error) { if len(n.Columns) != 0 { return nil, errors.New("expected 0 columns specified for file uploads") diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index b5a2a5816872..086ad103486c 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -24,9 +24,9 @@ import ( "github.com/cockroachdb/cockroach/pkg/build" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagepb" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" @@ -2146,7 +2146,7 @@ func (p *planner) getAllNames(ctx context.Context) (map[sqlbase.ID]NamespaceKey, // TestingGetAllNames is a wrapper for getAllNames. func TestingGetAllNames( - ctx context.Context, txn *client.Txn, executor *InternalExecutor, + ctx context.Context, txn *kv.Txn, executor *InternalExecutor, ) (map[sqlbase.ID]NamespaceKey, error) { return getAllNames(ctx, txn, executor) } @@ -2154,7 +2154,7 @@ func TestingGetAllNames( // getAllNames is the testable implementation of getAllNames. // It is public so that it can be tested outside the sql package. func getAllNames( - ctx context.Context, txn *client.Txn, executor *InternalExecutor, + ctx context.Context, txn *kv.Txn, executor *InternalExecutor, ) (map[sqlbase.ID]NamespaceKey, error) { namespace := map[sqlbase.ID]NamespaceKey{} rows, err := executor.Query( diff --git a/pkg/sql/crdb_internal_test.go b/pkg/sql/crdb_internal_test.go index 1aed70c22913..03bbee324f90 100644 --- a/pkg/sql/crdb_internal_test.go +++ b/pkg/sql/crdb_internal_test.go @@ -19,8 +19,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server/status/statuspb" @@ -50,7 +50,7 @@ func TestGetAllNamesInternal(t *testing.T) { s, _ /* sqlDB */, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) - err := kvDB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { batch := txn.NewBatch() batch.Put(sqlbase.NewTableKey(999, 444, "bob").Key(), 9999) batch.Put(sqlbase.NewDeprecatedTableKey(1000, "alice").Key(), 10000) @@ -211,7 +211,7 @@ CREATE TABLE t.test (k INT); tableDesc.Columns = append(tableDesc.Columns, *col) // Write the modified descriptor. - if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } diff --git a/pkg/sql/create_sequence.go b/pkg/sql/create_sequence.go index ec63ff724a84..fd7438d73b95 100644 --- a/pkg/sql/create_sequence.go +++ b/pkg/sql/create_sequence.go @@ -13,8 +13,8 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -119,7 +119,7 @@ func doCreateSequence( // Initialize the sequence value. seqValueKey := keys.MakeSequenceKey(uint32(id)) - b := &client.Batch{} + b := &kv.Batch{} b.Inc(seqValueKey, desc.SequenceOpts.Start-desc.SequenceOpts.Increment) if err := params.p.txn.Run(params.ctx, b); err != nil { return err diff --git a/pkg/sql/create_stats.go b/pkg/sql/create_stats.go index 962451dfcb6f..b7e4b5a3d3c2 100644 --- a/pkg/sql/create_stats.go +++ b/pkg/sql/create_stats.go @@ -14,9 +14,9 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -367,7 +367,7 @@ func (r *createStatsResumer) Resume( }() dsp := p.DistSQLPlanner() - if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := p.ExecCfg().DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if details.AsOf != nil { p.semaCtx.AsOfTimestamp = details.AsOf p.extendedEvalCtx.SetTxnTimestamp(details.AsOf.GoTime()) @@ -426,7 +426,7 @@ func (r *createStatsResumer) Resume( // to use the transaction that inserted the new stats into the // system.table_statistics table, but that would require calling // MakeEventLogger from the distsqlrun package. - return evalCtx.ExecCfg.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return evalCtx.ExecCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return MakeEventLogger(evalCtx.ExecCfg).InsertEventRecord( ctx, txn, diff --git a/pkg/sql/create_table.go b/pkg/sql/create_table.go index e97fa4a3dfc7..33735b080d7f 100644 --- a/pkg/sql/create_table.go +++ b/pkg/sql/create_table.go @@ -21,8 +21,8 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/parser" @@ -296,7 +296,7 @@ func (n *createTableNode) startExec(params runParams) error { err = func() error { // The data fill portion of CREATE AS must operate on a read snapshot, // so that it doesn't end up observing its own writes. - prevMode := params.p.Txn().ConfigureStepping(params.ctx, client.SteppingEnabled) + prevMode := params.p.Txn().ConfigureStepping(params.ctx, kv.SteppingEnabled) defer func() { _ = params.p.Txn().ConfigureStepping(params.ctx, prevMode) }() // This is a very simplified version of the INSERT logic: no CHECK @@ -436,7 +436,7 @@ func (p *planner) resolveFK( } func qualifyFKColErrorWithDB( - ctx context.Context, txn *client.Txn, tbl *sqlbase.TableDescriptor, col string, + ctx context.Context, txn *kv.Txn, tbl *sqlbase.TableDescriptor, col string, ) string { if txn == nil { return tree.ErrString(tree.NewUnresolvedName(tbl.Name, col)) @@ -531,7 +531,7 @@ func (p *planner) MaybeUpgradeDependentOldForeignKeyVersionTables( // This only applies for existing tables, not new tables. func ResolveFK( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, sc SchemaResolver, tbl *sqlbase.MutableTableDescriptor, d *tree.ForeignKeyConstraintTableDef, @@ -810,7 +810,7 @@ func (p *planner) addInterleave( // according to the given definition. func addInterleave( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, vt SchemaResolver, desc *sqlbase.MutableTableDescriptor, index *sqlbase.IndexDescriptor, @@ -1141,7 +1141,7 @@ func dequalifyColumnRefs( // the necessary sequences in KV before calling MakeTableDesc(). func MakeTableDesc( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, vt SchemaResolver, st *cluster.Settings, n *tree.CreateTable, diff --git a/pkg/sql/database.go b/pkg/sql/database.go index c1469344e737..2a5a13d74606 100644 --- a/pkg/sql/database.go +++ b/pkg/sql/database.go @@ -16,7 +16,7 @@ import ( "sync" "github.com/cockroachdb/cockroach/pkg/config" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -74,7 +74,7 @@ func makeDatabaseDesc(p *tree.CreateDatabase) sqlbase.DatabaseDescriptor { // getDatabaseID resolves a database name into a database ID. // Returns InvalidID on failure. func getDatabaseID( - ctx context.Context, txn *client.Txn, name string, required bool, + ctx context.Context, txn *kv.Txn, name string, required bool, ) (sqlbase.ID, error) { if name == sqlbase.SystemDB.Name { return sqlbase.SystemDB.ID, nil @@ -93,7 +93,7 @@ func getDatabaseID( // returning nil if the descriptor is not found. If you want the "not // found" condition to return an error, use mustGetDatabaseDescByID() instead. func getDatabaseDescByID( - ctx context.Context, txn *client.Txn, id sqlbase.ID, + ctx context.Context, txn *kv.Txn, id sqlbase.ID, ) (*sqlbase.DatabaseDescriptor, error) { desc := &sqlbase.DatabaseDescriptor{} if err := getDescriptorByID(ctx, txn, id, desc); err != nil { @@ -105,7 +105,7 @@ func getDatabaseDescByID( // MustGetDatabaseDescByID looks up the database descriptor given its ID, // returning an error if the descriptor is not found. func MustGetDatabaseDescByID( - ctx context.Context, txn *client.Txn, id sqlbase.ID, + ctx context.Context, txn *kv.Txn, id sqlbase.ID, ) (*sqlbase.DatabaseDescriptor, error) { desc, err := getDatabaseDescByID(ctx, txn, id) if err != nil { @@ -164,7 +164,7 @@ func (dc *databaseCache) getCachedDatabaseDescByID( // if it exists in the cache, otherwise falls back to KV operations. func (dc *databaseCache) getDatabaseDesc( ctx context.Context, - txnRunner func(context.Context, func(context.Context, *client.Txn) error) error, + txnRunner func(context.Context, func(context.Context, *kv.Txn) error) error, name string, required bool, ) (*sqlbase.DatabaseDescriptor, error) { @@ -177,7 +177,7 @@ func (dc *databaseCache) getDatabaseDesc( return nil, err } if desc == nil { - if err := txnRunner(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := txnRunner(ctx, func(ctx context.Context, txn *kv.Txn) error { a := UncachedPhysicalAccessor{} desc, err = a.GetDatabaseDesc(ctx, txn, name, tree.DatabaseLookupFlags{Required: required}) @@ -195,7 +195,7 @@ func (dc *databaseCache) getDatabaseDesc( // getDatabaseDescByID returns the database descriptor given its ID // if it exists in the cache, otherwise falls back to KV operations. func (dc *databaseCache) getDatabaseDescByID( - ctx context.Context, txn *client.Txn, id sqlbase.ID, + ctx context.Context, txn *kv.Txn, id sqlbase.ID, ) (*sqlbase.DatabaseDescriptor, error) { desc, err := dc.getCachedDatabaseDescByID(id) if desc == nil || err != nil { @@ -212,7 +212,7 @@ func (dc *databaseCache) getDatabaseDescByID( // operations. func (dc *databaseCache) getDatabaseID( ctx context.Context, - txnRunner func(context.Context, func(context.Context, *client.Txn) error) error, + txnRunner func(context.Context, func(context.Context, *kv.Txn) error) error, name string, required bool, ) (sqlbase.ID, error) { @@ -221,7 +221,7 @@ func (dc *databaseCache) getDatabaseID( return dbID, err } if dbID == sqlbase.InvalidID { - if err := txnRunner(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := txnRunner(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error dbID, err = getDatabaseID(ctx, txn, name, required) return err @@ -285,7 +285,7 @@ func (p *planner) renameDatabase( descKey := sqlbase.MakeDescMetadataKey(descID) descDesc := sqlbase.WrapDescriptor(oldDesc) - b := &client.Batch{} + b := &kv.Batch{} if p.ExtendedEvalContext().Tracing.KVTracingEnabled() { log.VEventf(ctx, 2, "CPut %s -> %d", newKey, descID) log.VEventf(ctx, 2, "Put %s -> %s", descKey, descDesc) diff --git a/pkg/sql/database_test.go b/pkg/sql/database_test.go index 1bb73f4ff97a..ed5ad7bb17ec 100644 --- a/pkg/sql/database_test.go +++ b/pkg/sql/database_test.go @@ -17,7 +17,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -51,7 +51,7 @@ func TestDatabaseAccessors(t *testing.T) { s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(context.TODO()) - if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { if _, err := getDatabaseDescByID(ctx, txn, sqlbase.SystemDB.ID); err != nil { return err } diff --git a/pkg/sql/delete_range.go b/pkg/sql/delete_range.go index 698db79e3005..28277891e5da 100644 --- a/pkg/sql/delete_range.go +++ b/pkg/sql/delete_range.go @@ -14,7 +14,7 @@ import ( "bytes" "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -248,7 +248,7 @@ func (d *deleteRangeNode) startExec(params runParams) error { } // deleteSpans adds each input span to a DelRange command in the given batch. -func (d *deleteRangeNode) deleteSpans(params runParams, b *client.Batch, spans roachpb.Spans) { +func (d *deleteRangeNode) deleteSpans(params runParams, b *kv.Batch, spans roachpb.Spans) { ctx := params.ctx traceKV := params.p.ExtendedEvalContext().Tracing.KVTracingEnabled() for _, span := range spans { @@ -264,7 +264,7 @@ func (d *deleteRangeNode) deleteSpans(params runParams, b *client.Batch, spans r // encountered during result processing, they're appended to the resumeSpans // input parameter. func (d *deleteRangeNode) processResults( - results []client.Result, resumeSpans []roachpb.Span, + results []kv.Result, resumeSpans []roachpb.Span, ) (roachpb.Spans, error) { for _, r := range results { var prev []byte diff --git a/pkg/sql/descriptor.go b/pkg/sql/descriptor.go index 08f6b9170bf9..4afb26669f8e 100644 --- a/pkg/sql/descriptor.go +++ b/pkg/sql/descriptor.go @@ -14,8 +14,8 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -55,9 +55,9 @@ var MaxDefaultDescriptorID = keys.MaxReservedDescID + sqlbase.ID(len(DefaultUser // GenerateUniqueDescID returns the next available Descriptor ID and increments // the counter. The incrementing is non-transactional, and the counter could be // incremented multiple times because of retries. -func GenerateUniqueDescID(ctx context.Context, db *client.DB) (sqlbase.ID, error) { +func GenerateUniqueDescID(ctx context.Context, db *kv.DB) (sqlbase.ID, error) { // Increment unique descriptor counter. - newVal, err := client.IncrementValRetryable(ctx, db, keys.DescIDGenerator, 1) + newVal, err := kv.IncrementValRetryable(ctx, db, keys.DescIDGenerator, 1) if err != nil { return sqlbase.InvalidID, err } @@ -131,7 +131,7 @@ func (p *planner) createDescriptorWithID( // mimicry. In particular, we're only writing a single key per table, while // perfect mimicry would involve writing a sentinel key for each row as well. - b := &client.Batch{} + b := &kv.Batch{} descID := descriptor.GetID() if p.ExtendedEvalContext().Tracing.KVTracingEnabled() { log.VEventf(ctx, 2, "CPut %s -> %d", idKey, descID) @@ -163,7 +163,7 @@ func (p *planner) createDescriptorWithID( // getDescriptorID looks up the ID for plainKey. // InvalidID is returned if the name cannot be resolved. func getDescriptorID( - ctx context.Context, txn *client.Txn, plainKey sqlbase.DescriptorKey, + ctx context.Context, txn *kv.Txn, plainKey sqlbase.DescriptorKey, ) (sqlbase.ID, error) { key := plainKey.Key() log.Eventf(ctx, "looking up descriptor ID for name key %q", key) @@ -179,7 +179,7 @@ func getDescriptorID( // resolveSchemaID resolves a schema's ID based on db and name. func resolveSchemaID( - ctx context.Context, txn *client.Txn, dbID sqlbase.ID, scName string, + ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, scName string, ) (bool, sqlbase.ID, error) { // Try to use the system name resolution bypass. Avoids a hotspot by explicitly // checking for public schema. @@ -201,7 +201,7 @@ func resolveSchemaID( // Returns the descriptor (if found), a bool representing whether the // descriptor was found and an error if any. func lookupDescriptorByID( - ctx context.Context, txn *client.Txn, id sqlbase.ID, + ctx context.Context, txn *kv.Txn, id sqlbase.ID, ) (sqlbase.DescriptorProto, bool, error) { var desc sqlbase.DescriptorProto for _, lookupFn := range []func() (sqlbase.DescriptorProto, error){ @@ -231,7 +231,7 @@ func lookupDescriptorByID( // In most cases you'll want to use wrappers: `getDatabaseDescByID` or // `getTableDescByID`. func getDescriptorByID( - ctx context.Context, txn *client.Txn, id sqlbase.ID, descriptor sqlbase.DescriptorProto, + ctx context.Context, txn *kv.Txn, id sqlbase.ID, descriptor sqlbase.DescriptorProto, ) error { log.Eventf(ctx, "fetching descriptor with ID %d", id) descKey := sqlbase.MakeDescMetadataKey(id) @@ -278,7 +278,7 @@ func IsDefaultCreatedDescriptor(descID sqlbase.ID) bool { // CountUserDescriptors returns the number of descriptors present that were // created by the user (i.e. not present when the cluster started). -func CountUserDescriptors(ctx context.Context, txn *client.Txn) (int, error) { +func CountUserDescriptors(ctx context.Context, txn *kv.Txn) (int, error) { allDescs, err := GetAllDescriptors(ctx, txn) if err != nil { return 0, err @@ -295,7 +295,7 @@ func CountUserDescriptors(ctx context.Context, txn *client.Txn) (int, error) { } // GetAllDescriptors looks up and returns all available descriptors. -func GetAllDescriptors(ctx context.Context, txn *client.Txn) ([]sqlbase.DescriptorProto, error) { +func GetAllDescriptors(ctx context.Context, txn *kv.Txn) ([]sqlbase.DescriptorProto, error) { log.Eventf(ctx, "fetching all descriptors") descsKey := sqlbase.MakeAllDescsMetadataKey() kvs, err := txn.Scan(ctx, descsKey, descsKey.PrefixEnd(), 0) @@ -327,7 +327,7 @@ func GetAllDescriptors(ctx context.Context, txn *client.Txn) ([]sqlbase.Descript // GetAllDatabaseDescriptorIDs looks up and returns all available database // descriptor IDs. -func GetAllDatabaseDescriptorIDs(ctx context.Context, txn *client.Txn) ([]sqlbase.ID, error) { +func GetAllDatabaseDescriptorIDs(ctx context.Context, txn *kv.Txn) ([]sqlbase.ID, error) { log.Eventf(ctx, "fetching all database descriptor IDs") nameKey := sqlbase.NewDatabaseKey("" /* name */).Key() kvs, err := txn.Scan(ctx, nameKey, nameKey.PrefixEnd(), 0 /*maxRows */) @@ -365,7 +365,7 @@ func writeDescToBatch( ctx context.Context, kvTrace bool, s *cluster.Settings, - b *client.Batch, + b *kv.Batch, descID sqlbase.ID, desc sqlbase.DescriptorProto, ) (err error) { @@ -386,7 +386,7 @@ func WriteNewDescToBatch( ctx context.Context, kvTrace bool, s *cluster.Settings, - b *client.Batch, + b *kv.Batch, tableID sqlbase.ID, desc sqlbase.DescriptorProto, ) (err error) { diff --git a/pkg/sql/descriptor_mutation_test.go b/pkg/sql/descriptor_mutation_test.go index e7ce836b8d48..ab5604b4549f 100644 --- a/pkg/sql/descriptor_mutation_test.go +++ b/pkg/sql/descriptor_mutation_test.go @@ -18,8 +18,8 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -35,12 +35,12 @@ type mutationTest struct { // SQLRunner embeds testing.TB testing.TB *sqlutils.SQLRunner - kvDB *client.DB + kvDB *kv.DB tableDesc *sqlbase.TableDescriptor } func makeMutationTest( - t *testing.T, kvDB *client.DB, db *gosql.DB, tableDesc *sqlbase.TableDescriptor, + t *testing.T, kvDB *kv.DB, db *gosql.DB, tableDesc *sqlbase.TableDescriptor, ) mutationTest { return mutationTest{ TB: t, diff --git a/pkg/sql/distsql/server.go b/pkg/sql/distsql/server.go index f7bddd3e5eb8..dfb7aeca9999 100644 --- a/pkg/sql/distsql/server.go +++ b/pkg/sql/distsql/server.go @@ -17,7 +17,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/colflow" @@ -219,7 +219,7 @@ func (ds *ServerImpl) setupFlow( ) monitor.Start(ctx, parentMonitor, mon.BoundAccount{}) - makeLeaf := func(req *execinfrapb.SetupFlowRequest) (*client.Txn, error) { + makeLeaf := func(req *execinfrapb.SetupFlowRequest) (*kv.Txn, error) { tis := req.LeafTxnInputState if tis == nil { // This must be a flow running for some bulk-io operation that doesn't use @@ -232,11 +232,11 @@ func (ds *ServerImpl) setupFlow( } // The flow will run in a LeafTxn because we do not want each distributed // Txn to heartbeat the transaction. - return client.NewLeafTxn(ctx, ds.FlowDB, req.Flow.Gateway, tis), nil + return kv.NewLeafTxn(ctx, ds.FlowDB, req.Flow.Gateway, tis), nil } var evalCtx *tree.EvalContext - var leafTxn *client.Txn + var leafTxn *kv.Txn if localState.EvalContext != nil { evalCtx = localState.EvalContext evalCtx.Mon = &monitor @@ -369,7 +369,7 @@ func (ds *ServerImpl) setupFlow( // that have no remote flows and also no concurrency, the txn comes from // localState.Txn. Otherwise, we create a txn based on the request's // LeafTxnInputState. - var txn *client.Txn + var txn *kv.Txn if localState.IsLocal && !f.ConcurrentExecution() { txn = localState.Txn } else { @@ -440,7 +440,7 @@ type LocalState struct { // Txn is filled in on the gateway only. It is the RootTxn that the query is running in. // This will be used directly by the flow if the flow has no concurrency and IsLocal is set. // If there is concurrency, a LeafTxn will be created. - Txn *client.Txn + Txn *kv.Txn ///////////////////////////////////////////// // Fields below are empty if IsLocal == false @@ -590,7 +590,7 @@ var _ sqlutil.InternalExecutor = &lazyInternalExecutor{} func (ie *lazyInternalExecutor) QueryRowEx( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, opts sqlbase.InternalExecutorSessionDataOverride, stmt string, qargs ...interface{}, @@ -602,7 +602,7 @@ func (ie *lazyInternalExecutor) QueryRowEx( } func (ie *lazyInternalExecutor) QueryRow( - ctx context.Context, opName string, txn *client.Txn, stmt string, qargs ...interface{}, + ctx context.Context, opName string, txn *kv.Txn, stmt string, qargs ...interface{}, ) (tree.Datums, error) { ie.once.Do(func() { ie.InternalExecutor = ie.newInternalExecutor() diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go index dd8e345689e3..88700ff547df 100644 --- a/pkg/sql/distsql_physical_planner.go +++ b/pkg/sql/distsql_physical_planner.go @@ -18,8 +18,8 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" @@ -3241,7 +3241,7 @@ func (dsp *DistSQLPlanner) createPlanForExport( // NewPlanningCtx returns a new PlanningCtx. func (dsp *DistSQLPlanner) NewPlanningCtx( - ctx context.Context, evalCtx *extendedEvalContext, txn *client.Txn, + ctx context.Context, evalCtx *extendedEvalContext, txn *kv.Txn, ) *PlanningCtx { planCtx := dsp.newLocalPlanningCtx(ctx, evalCtx) planCtx.spanIter = dsp.spanResolver.NewSpanResolverIterator(txn) diff --git a/pkg/sql/distsql_physical_planner_test.go b/pkg/sql/distsql_physical_planner_test.go index 484069dc89ab..8fb1f36f2a54 100644 --- a/pkg/sql/distsql_physical_planner_test.go +++ b/pkg/sql/distsql_physical_planner_test.go @@ -24,7 +24,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" @@ -605,9 +605,7 @@ type testSpanResolver struct { } // NewSpanResolverIterator is part of the SpanResolver interface. -func (tsr *testSpanResolver) NewSpanResolverIterator( - _ *client.Txn, -) physicalplan.SpanResolverIterator { +func (tsr *testSpanResolver) NewSpanResolverIterator(_ *kv.Txn) physicalplan.SpanResolverIterator { return &testSpanResolverIterator{tsr: tsr} } diff --git a/pkg/sql/distsql_plan_csv.go b/pkg/sql/distsql_plan_csv.go index fca94d94fe26..e6d638c3c989 100644 --- a/pkg/sql/distsql_plan_csv.go +++ b/pkg/sql/distsql_plan_csv.go @@ -17,9 +17,9 @@ import ( "sync/atomic" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -193,7 +193,7 @@ func presplitTableBoundaries( scatterReq := &roachpb.AdminScatterRequest{ RequestHeader: roachpb.RequestHeaderFromSpan(span), } - if _, pErr := client.SendWrapped(ctx, cfg.DB.NonTransactionalSender(), scatterReq); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, cfg.DB.NonTransactionalSender(), scatterReq); pErr != nil { log.Errorf(ctx, "failed to scatter span %s: %s", span.Key, pErr) } } diff --git a/pkg/sql/distsql_plan_ctas.go b/pkg/sql/distsql_plan_ctas.go index f77cbcd508c4..08bac1b0e5d2 100644 --- a/pkg/sql/distsql_plan_ctas.go +++ b/pkg/sql/distsql_plan_ctas.go @@ -13,7 +13,7 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/rowexec" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -25,7 +25,7 @@ func PlanAndRunCTAS( ctx context.Context, dsp *DistSQLPlanner, planner *planner, - txn *client.Txn, + txn *kv.Txn, isLocal bool, in planNode, out execinfrapb.ProcessorCoreUnion, diff --git a/pkg/sql/distsql_plan_join_test.go b/pkg/sql/distsql_plan_join_test.go index f316cbb8e8a9..d90f6c8fd38d 100644 --- a/pkg/sql/distsql_plan_join_test.go +++ b/pkg/sql/distsql_plan_join_test.go @@ -19,7 +19,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -102,7 +102,7 @@ var tableNames = map[string]bool{ // Format for any key: // ///.../#///.... -func encodeTestKey(kvDB *client.DB, keyStr string) (roachpb.Key, error) { +func encodeTestKey(kvDB *kv.DB, keyStr string) (roachpb.Key, error) { var key []byte tokens := strings.Split(keyStr, "/") @@ -131,7 +131,7 @@ func encodeTestKey(kvDB *client.DB, keyStr string) (roachpb.Key, error) { return key, nil } -func decodeTestKey(kvDB *client.DB, key roachpb.Key) (string, error) { +func decodeTestKey(kvDB *kv.DB, key roachpb.Key) (string, error) { var out []byte keyStr := roachpb.PrettyPrintKey(nil /* valDirs */, key) @@ -151,7 +151,7 @@ func decodeTestKey(kvDB *client.DB, key roachpb.Key) (string, error) { return "", err } - if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { desc, err := sqlbase.GetTableDescFromID(context.TODO(), txn, sqlbase.ID(descID)) if err != nil { return err @@ -441,7 +441,7 @@ type testPartition struct { spans [][2]string } -func makeSpanPartitions(kvDB *client.DB, testParts []testPartition) ([]SpanPartition, error) { +func makeSpanPartitions(kvDB *kv.DB, testParts []testPartition) ([]SpanPartition, error) { spanParts := make([]SpanPartition, len(testParts)) for i, testPart := range testParts { diff --git a/pkg/sql/distsql_plan_stats.go b/pkg/sql/distsql_plan_stats.go index c1b322aa13db..3ea13423c2db 100644 --- a/pkg/sql/distsql_plan_stats.go +++ b/pkg/sql/distsql_plan_stats.go @@ -14,9 +14,9 @@ import ( "context" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -226,7 +226,7 @@ func (dsp *DistSQLPlanner) planAndRunCreateStats( ctx context.Context, evalCtx *extendedEvalContext, planCtx *PlanningCtx, - txn *client.Txn, + txn *kv.Txn, job *jobs.Job, resultRows *RowResultWriter, ) error { diff --git a/pkg/sql/distsql_running.go b/pkg/sql/distsql_running.go index 51d3b2c4c989..f7fd63f7f472 100644 --- a/pkg/sql/distsql_running.go +++ b/pkg/sql/distsql_running.go @@ -17,7 +17,7 @@ import ( "sync" "sync/atomic" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" @@ -282,7 +282,7 @@ func (dsp *DistSQLPlanner) setupFlows( // resources. func (dsp *DistSQLPlanner) Run( planCtx *PlanningCtx, - txn *client.Txn, + txn *kv.Txn, plan *PhysicalPlan, recv *DistSQLReceiver, evalCtx *extendedEvalContext, @@ -458,7 +458,7 @@ type DistSQLReceiver struct { // pass back LeafTxnFinalState objects via ProducerMetas. Nil if no // transaction should be updated on errors (i.e. if the flow overall // doesn't run in a transaction). - txn *client.Txn + txn *kv.Txn // A handler for clock signals arriving from remote nodes. This should update // this node's clock. @@ -543,7 +543,7 @@ func MakeDistSQLReceiver( stmtType tree.StatementType, rangeCache *kvcoord.RangeDescriptorCache, leaseCache *kvcoord.LeaseHolderCache, - txn *client.Txn, + txn *kv.Txn, updateClock func(observedTs hlc.Timestamp), tracing *SessionTracing, ) *DistSQLReceiver { @@ -972,7 +972,7 @@ func (dsp *DistSQLPlanner) PlanAndRun( ctx context.Context, evalCtx *extendedEvalContext, planCtx *PlanningCtx, - txn *client.Txn, + txn *kv.Txn, plan planNode, recv *DistSQLReceiver, ) (cleanup func()) { @@ -998,7 +998,7 @@ func (dsp *DistSQLPlanner) PlanAndRunPostqueries( recv *DistSQLReceiver, maybeDistribute bool, ) bool { - prevSteppingMode := planner.Txn().ConfigureStepping(ctx, client.SteppingEnabled) + prevSteppingMode := planner.Txn().ConfigureStepping(ctx, kv.SteppingEnabled) defer func() { _ = planner.Txn().ConfigureStepping(ctx, prevSteppingMode) }() for _, postqueryPlan := range postqueryPlans { // We place a sequence point before every postquery, so diff --git a/pkg/sql/distsql_running_test.go b/pkg/sql/distsql_running_test.go index 7290e051f2c7..857c93426a8c 100644 --- a/pkg/sql/distsql_running_test.go +++ b/pkg/sql/distsql_running_test.go @@ -17,7 +17,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" @@ -60,7 +60,7 @@ func TestDistSQLRunningInAbortedTxn(t *testing.T) { execCfg := s.ExecutorConfig().(ExecutorConfig) internalPlanner, cleanup := NewInternalPlanner( "test", - client.NewTxn(ctx, db, s.NodeID()), + kv.NewTxn(ctx, db, s.NodeID()), security.RootUser, &MemoryMetrics{}, &execCfg, @@ -75,7 +75,7 @@ func TestDistSQLRunningInAbortedTxn(t *testing.T) { push := func(ctx context.Context, key roachpb.Key) error { // Conflicting transaction that pushes another transaction. - conflictTxn := client.NewTxn(ctx, db, 0 /* gatewayNodeID */) + conflictTxn := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) // We need to explicitly set a high priority for the push to happen. if err := conflictTxn.SetUserPriority(roachpb.MaxUserPriority); err != nil { return err @@ -101,13 +101,13 @@ func TestDistSQLRunningInAbortedTxn(t *testing.T) { }, s.DistSenderI().(*kvcoord.DistSender), ) - shortDB := client.NewDB(ambient, tsf, s.Clock()) + shortDB := kv.NewDB(ambient, tsf, s.Clock()) iter := 0 // We'll trace to make sure the test isn't fooling itself. runningCtx, getRec, cancel := tracing.ContextWithRecordingSpan(ctx, "test") defer cancel() - err = shortDB.Txn(runningCtx, func(ctx context.Context, txn *client.Txn) error { + err = shortDB.Txn(runningCtx, func(ctx context.Context, txn *kv.Txn) error { iter++ if iter == 1 { // On the first iteration, abort the txn. @@ -193,7 +193,7 @@ func TestDistSQLReceiverErrorRanking(t *testing.T) { s, _, db := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) - txn := client.NewTxn(ctx, db, s.NodeID()) + txn := kv.NewTxn(ctx, db, s.NodeID()) // We're going to use a rowResultWriter to which only errors will be passed. rw := newCallbackResultWriter(nil /* fn */) diff --git a/pkg/sql/drop_database.go b/pkg/sql/drop_database.go index 626f98026cea..e954f1296254 100644 --- a/pkg/sql/drop_database.go +++ b/pkg/sql/drop_database.go @@ -14,9 +14,9 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/config" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -165,7 +165,7 @@ func (n *dropDatabaseNode) startExec(params runParams) error { descKey := sqlbase.MakeDescMetadataKey(n.dbDesc.ID) - b := &client.Batch{} + b := &kv.Batch{} if p.ExtendedEvalContext().Tracing.KVTracingEnabled() { log.VEventf(ctx, 2, "Del %s", descKey) } diff --git a/pkg/sql/drop_test.go b/pkg/sql/drop_test.go index 1ec4e53399c2..4a892273d611 100644 --- a/pkg/sql/drop_test.go +++ b/pkg/sql/drop_test.go @@ -20,10 +20,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server" @@ -894,8 +894,8 @@ func TestDropTableDeleteData(t *testing.T) { } } -func writeTableDesc(ctx context.Context, db *client.DB, tableDesc *sqlbase.TableDescriptor) error { - return db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { +func writeTableDesc(ctx context.Context, db *kv.DB, tableDesc *sqlbase.TableDescriptor) error { + return db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } diff --git a/pkg/sql/event_log.go b/pkg/sql/event_log.go index 57111a427234..5bb9387f85ef 100644 --- a/pkg/sql/event_log.go +++ b/pkg/sql/event_log.go @@ -14,7 +14,7 @@ import ( "context" "encoding/json" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/pkg/errors" ) @@ -122,7 +122,7 @@ func MakeEventLogger(execCfg *ExecutorConfig) EventLogger { // provided transaction. func (ev EventLogger) InsertEventRecord( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, eventType EventLogType, targetID, reportingID int32, info interface{}, diff --git a/pkg/sql/exec_util.go b/pkg/sql/exec_util.go index 10363263087b..f7f6caab9c51 100644 --- a/pkg/sql/exec_util.go +++ b/pkg/sql/exec_util.go @@ -28,8 +28,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -550,7 +550,7 @@ type ExecutorConfig struct { DefaultZoneConfig *zonepb.ZoneConfig Locality roachpb.Locality AmbientCtx log.AmbientContext - DB *client.DB + DB *kv.DB Gossip *gossip.Gossip DistSender *kvcoord.DistSender RPCContext *rpc.Context diff --git a/pkg/sql/execinfra/base.go b/pkg/sql/execinfra/base.go index 5bcb3e04519f..596f83071a50 100644 --- a/pkg/sql/execinfra/base.go +++ b/pkg/sql/execinfra/base.go @@ -15,7 +15,7 @@ import ( "sync" "sync/atomic" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -262,8 +262,8 @@ func SendTraceData(ctx context.Context, dst RowReceiver) { // a node, and so it's possible for multiple processors to send the same // LeafTxnFinalState. The root TxnCoordSender doesn't care if it receives the same // thing multiple times. -func GetLeafTxnFinalState(ctx context.Context, txn *client.Txn) *roachpb.LeafTxnFinalState { - if txn.Type() != client.LeafTxn { +func GetLeafTxnFinalState(ctx context.Context, txn *kv.Txn) *roachpb.LeafTxnFinalState { + if txn.Type() != kv.LeafTxn { return nil } txnMeta, err := txn.GetLeafTxnFinalState(ctx) diff --git a/pkg/sql/execinfra/flow_context.go b/pkg/sql/execinfra/flow_context.go index 173c06891484..07984895dcaa 100644 --- a/pkg/sql/execinfra/flow_context.go +++ b/pkg/sql/execinfra/flow_context.go @@ -13,7 +13,7 @@ package execinfra import ( - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -48,7 +48,7 @@ type FlowCtx struct { // must be performed. Processors in the Flow will use this txn concurrently. // This field is generally not nil, except for flows that don't run in a // higher-level txn (like backfills). - Txn *client.Txn + Txn *kv.Txn // nodeID is the ID of the node on which the processors using this FlowCtx // run. diff --git a/pkg/sql/execinfra/server_config.go b/pkg/sql/execinfra/server_config.go index 7aa3397ee092..ec2391c5fd85 100644 --- a/pkg/sql/execinfra/server_config.go +++ b/pkg/sql/execinfra/server_config.go @@ -15,8 +15,8 @@ package execinfra import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/diskmap" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -103,7 +103,7 @@ type ServerConfig struct { RuntimeStats RuntimeStats // DB is a handle to the cluster. - DB *client.DB + DB *kv.DB // Executor can be used to run "internal queries". Note that Flows also have // access to an executor in the EvalContext. That one is "session bound" // whereas this one isn't. @@ -113,7 +113,7 @@ type ServerConfig struct { // This DB has to be set such that it bypasses the local TxnCoordSender. We // want only the TxnCoordSender on the gateway to be involved with requests // performed by DistSQL. - FlowDB *client.DB + FlowDB *kv.DB RPCContext *rpc.Context Stopper *stop.Stopper TestingKnobs TestingKnobs diff --git a/pkg/sql/explain_bundle.go b/pkg/sql/explain_bundle.go index 3191d02fe929..c7c4a8f83a45 100644 --- a/pkg/sql/explain_bundle.go +++ b/pkg/sql/explain_bundle.go @@ -16,7 +16,7 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -74,7 +74,7 @@ func buildStatementBundle( statement := tree.AsString(plan.stmt.AST) description := "query support bundle" - err = db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err = db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { // Insert the bundle into system.statement_bundle_chunks. // TODO(radu): split in chunks. row, err := ie.QueryRowEx( diff --git a/pkg/sql/explain_tree_test.go b/pkg/sql/explain_tree_test.go index 5df783484ba2..2e279faca975 100644 --- a/pkg/sql/explain_tree_test.go +++ b/pkg/sql/explain_tree_test.go @@ -15,7 +15,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -54,7 +54,7 @@ func TestPlanToTreeAndPlanToString(t *testing.T) { internalPlanner, cleanup := NewInternalPlanner( "test", - client.NewTxn(ctx, db, s.NodeID()), + kv.NewTxn(ctx, db, s.NodeID()), security.RootUser, &MemoryMetrics{}, &execCfg, diff --git a/pkg/sql/flowinfra/cluster_test.go b/pkg/sql/flowinfra/cluster_test.go index e1c32c9377b7..444ca79a0039 100644 --- a/pkg/sql/flowinfra/cluster_test.go +++ b/pkg/sql/flowinfra/cluster_test.go @@ -19,7 +19,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -94,7 +94,7 @@ func TestClusterFlow(t *testing.T) { now, 0, // maxOffset ) - txn := client.NewTxnFromProto(ctx, kvDB, tc.Server(0).NodeID(), now, client.RootTxn, &txnProto) + txn := kv.NewTxnFromProto(ctx, kvDB, tc.Server(0).NodeID(), now, kv.RootTxn, &txnProto) leafInputState := txn.GetLeafTxnInputState(ctx) tr1 := execinfrapb.TableReaderSpec{ @@ -416,9 +416,9 @@ func TestLimitedBufferingDeadlock(t *testing.T) { now, 0, // maxOffset ) - txn := client.NewTxnFromProto( + txn := kv.NewTxnFromProto( context.TODO(), tc.Server(0).DB(), tc.Server(0).NodeID(), - now, client.RootTxn, &txnProto) + now, kv.RootTxn, &txnProto) leafInputState := txn.GetLeafTxnInputState(context.TODO()) req := execinfrapb.SetupFlowRequest{ @@ -731,9 +731,9 @@ func BenchmarkInfrastructure(b *testing.B) { now, 0, // maxOffset ) - txn := client.NewTxnFromProto( + txn := kv.NewTxnFromProto( context.TODO(), tc.Server(0).DB(), tc.Server(0).NodeID(), - now, client.RootTxn, &txnProto) + now, kv.RootTxn, &txnProto) leafInputState := txn.GetLeafTxnInputState(context.TODO()) for i := range reqs { reqs[i] = execinfrapb.SetupFlowRequest{ diff --git a/pkg/sql/flowinfra/flow.go b/pkg/sql/flowinfra/flow.go index 3ff1d122c3d3..24a9420a581b 100644 --- a/pkg/sql/flowinfra/flow.go +++ b/pkg/sql/flowinfra/flow.go @@ -14,7 +14,7 @@ import ( "context" "sync" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -71,7 +71,7 @@ type Flow interface { // SetTxn is used to provide the transaction in which the flow will run. // It needs to be called after Setup() and before Start/Run. - SetTxn(*client.Txn) + SetTxn(*kv.Txn) // Start starts the flow. Processors run asynchronously in their own goroutines. // Wait() needs to be called to wait for the flow to finish. @@ -181,7 +181,7 @@ func (f *FlowBase) Setup( } // SetTxn is part of the Flow interface. -func (f *FlowBase) SetTxn(txn *client.Txn) { +func (f *FlowBase) SetTxn(txn *kv.Txn) { f.FlowCtx.Txn = txn f.EvalCtx.Txn = txn } diff --git a/pkg/sql/flowinfra/flow_test.go b/pkg/sql/flowinfra/flow_test.go index c1f6b5b7d286..af737f499b58 100644 --- a/pkg/sql/flowinfra/flow_test.go +++ b/pkg/sql/flowinfra/flow_test.go @@ -16,7 +16,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -46,7 +46,7 @@ func BenchmarkFlowSetup(b *testing.B) { b.RunParallel(func(pb *testing.PB) { planner, cleanup := sql.NewInternalPlanner( "test", - client.NewTxn(ctx, s.DB(), s.NodeID()), + kv.NewTxn(ctx, s.DB(), s.NodeID()), security.RootUser, &sql.MemoryMetrics{}, &execCfg, diff --git a/pkg/sql/flowinfra/server_test.go b/pkg/sql/flowinfra/server_test.go index 9d501df89e25..3946357ec6fd 100644 --- a/pkg/sql/flowinfra/server_test.go +++ b/pkg/sql/flowinfra/server_test.go @@ -18,7 +18,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -61,7 +61,7 @@ func TestServer(t *testing.T) { OutputColumns: []uint32{0, 1}, // a } - txn := client.NewTxn(ctx, kvDB, s.NodeID()) + txn := kv.NewTxn(ctx, kvDB, s.NodeID()) leafInputState := txn.GetLeafTxnInputState(ctx) req := &execinfrapb.SetupFlowRequest{ diff --git a/pkg/sql/internal.go b/pkg/sql/internal.go index 6eb755d55575..40655f3c5484 100644 --- a/pkg/sql/internal.go +++ b/pkg/sql/internal.go @@ -15,7 +15,7 @@ import ( "math" "sync" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/parser" @@ -108,7 +108,7 @@ func (ie *InternalExecutor) SetSessionData(sessionData *sessiondata.SessionData) // sd will constitute the executor's session state. func (ie *InternalExecutor) initConnEx( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, sd *sessiondata.SessionData, sdMut sessionDataMutator, syncCallback func([]resWithPos), @@ -178,7 +178,7 @@ func (ie *InternalExecutor) initConnEx( // Query is deprecated because it may transparently execute a query as root. Use // QueryEx instead. func (ie *InternalExecutor) Query( - ctx context.Context, opName string, txn *client.Txn, stmt string, qargs ...interface{}, + ctx context.Context, opName string, txn *kv.Txn, stmt string, qargs ...interface{}, ) ([]tree.Datums, error) { return ie.QueryEx(ctx, opName, txn, ie.maybeRootSessionDataOverride(opName), stmt, qargs...) } @@ -191,7 +191,7 @@ func (ie *InternalExecutor) Query( func (ie *InternalExecutor) QueryEx( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, session sqlbase.InternalExecutorSessionDataOverride, stmt string, qargs ...interface{}, @@ -205,7 +205,7 @@ func (ie *InternalExecutor) QueryEx( func (ie *InternalExecutor) QueryWithCols( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, session sqlbase.InternalExecutorSessionDataOverride, stmt string, qargs ...interface{}, @@ -216,7 +216,7 @@ func (ie *InternalExecutor) QueryWithCols( func (ie *InternalExecutor) queryInternal( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, sessionDataOverride sqlbase.InternalExecutorSessionDataOverride, stmt string, qargs ...interface{}, @@ -233,7 +233,7 @@ func (ie *InternalExecutor) queryInternal( // // QueryRow is deprecated (like Query). Use QueryRowEx() instead. func (ie *InternalExecutor) QueryRow( - ctx context.Context, opName string, txn *client.Txn, stmt string, qargs ...interface{}, + ctx context.Context, opName string, txn *kv.Txn, stmt string, qargs ...interface{}, ) (tree.Datums, error) { return ie.QueryRowEx(ctx, opName, txn, ie.maybeRootSessionDataOverride(opName), stmt, qargs...) } @@ -246,7 +246,7 @@ func (ie *InternalExecutor) QueryRow( func (ie *InternalExecutor) QueryRowEx( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, session sqlbase.InternalExecutorSessionDataOverride, stmt string, qargs ...interface{}, @@ -274,7 +274,7 @@ func (ie *InternalExecutor) QueryRowEx( // Exec is deprecated because it may transparently execute a query as root. Use // ExecEx instead. func (ie *InternalExecutor) Exec( - ctx context.Context, opName string, txn *client.Txn, stmt string, qargs ...interface{}, + ctx context.Context, opName string, txn *kv.Txn, stmt string, qargs ...interface{}, ) (int, error) { return ie.ExecEx(ctx, opName, txn, ie.maybeRootSessionDataOverride(opName), stmt, qargs...) } @@ -287,7 +287,7 @@ func (ie *InternalExecutor) Exec( func (ie *InternalExecutor) ExecEx( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, session sqlbase.InternalExecutorSessionDataOverride, stmt string, qargs ...interface{}, @@ -346,7 +346,7 @@ func (ie *InternalExecutor) maybeRootSessionDataOverride( func (ie *InternalExecutor) execInternal( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, sessionDataOverride sqlbase.InternalExecutorSessionDataOverride, stmt string, qargs ...interface{}, diff --git a/pkg/sql/internal_test.go b/pkg/sql/internal_test.go index eeb3ac134147..3814a75bf94f 100644 --- a/pkg/sql/internal_test.go +++ b/pkg/sql/internal_test.go @@ -17,7 +17,7 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql" @@ -79,7 +79,7 @@ func TestInternalExecutor(t *testing.T) { // Test the auto-retries work inside an external transaction too. In this // case, the executor cannot retry internally. cnt := 0 - err = s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err = s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { cnt++ row, err = ie.QueryRowEx( ctx, "test", txn, @@ -247,10 +247,10 @@ func TestInternalExecAppNameInitialization(t *testing.T) { type testInternalExecutor interface { Query( - ctx context.Context, opName string, txn *client.Txn, stmt string, qargs ...interface{}, + ctx context.Context, opName string, txn *kv.Txn, stmt string, qargs ...interface{}, ) ([]tree.Datums, error) Exec( - ctx context.Context, opName string, txn *client.Txn, stmt string, qargs ...interface{}, + ctx context.Context, opName string, txn *kv.Txn, stmt string, qargs ...interface{}, ) (int, error) } diff --git a/pkg/sql/join_test.go b/pkg/sql/join_test.go index 9590a7b4e0ea..c0656045e80d 100644 --- a/pkg/sql/join_test.go +++ b/pkg/sql/join_test.go @@ -13,14 +13,14 @@ package sql import ( "fmt" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/span" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" ) -func newTestScanNode(kvDB *client.DB, tableName string) (*scanNode, error) { +func newTestScanNode(kvDB *kv.DB, tableName string) (*scanNode, error) { desc := sqlbase.GetImmutableTableDescriptor(kvDB, sqlutils.TestDB, tableName) p := planner{} @@ -53,7 +53,7 @@ func newTestScanNode(kvDB *client.DB, tableName string) (*scanNode, error) { return scan, nil } -func newTestJoinNode(kvDB *client.DB, leftName, rightName string) (*joinNode, error) { +func newTestJoinNode(kvDB *kv.DB, leftName, rightName string) (*joinNode, error) { left, err := newTestScanNode(kvDB, leftName) if err != nil { return nil, err diff --git a/pkg/sql/lease.go b/pkg/sql/lease.go index b09a3fb60f3b..b652eb159c13 100644 --- a/pkg/sql/lease.go +++ b/pkg/sql/lease.go @@ -24,8 +24,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/settings" @@ -139,7 +139,7 @@ func storedLeaseExpiration(expiration hlc.Timestamp) tree.DTimestamp { // publishing a new version of a descriptor. Exported only for testing. type LeaseStore struct { nodeIDContainer *base.NodeIDContainer - db *client.DB + db *kv.DB clock *hlc.Clock internalExecutor sqlutil.InternalExecutor settings *cluster.Settings @@ -180,7 +180,7 @@ func (s LeaseStore) acquire( ctx context.Context, minExpiration hlc.Timestamp, tableID sqlbase.ID, ) (*tableVersionState, error) { var table *tableVersionState - err := s.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { expiration := txn.ReadTimestamp() expiration.WallTime += int64(s.jitteredLeaseDuration()) if expiration.LessEq(minExpiration) { @@ -350,7 +350,7 @@ func (s LeaseStore) PublishMultiple( ctx context.Context, tableIDs []sqlbase.ID, update func(map[sqlbase.ID]*sqlbase.MutableTableDescriptor) error, - logEvent func(*client.Txn) error, + logEvent func(*kv.Txn) error, ) (map[sqlbase.ID]*sqlbase.ImmutableTableDescriptor, error) { errLeaseVersionChanged := errors.New("lease version changed") // Retry while getting errLeaseVersionChanged. @@ -369,7 +369,7 @@ func (s LeaseStore) PublishMultiple( tableDescs := make(map[sqlbase.ID]*sqlbase.MutableTableDescriptor) // There should be only one version of the descriptor, but it's // a race now to update to the next version. - err := s.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { versions := make(map[sqlbase.ID]sqlbase.DescriptorVersion) descsToUpdate := make(map[sqlbase.ID]*sqlbase.MutableTableDescriptor) for _, id := range tableIDs { @@ -476,7 +476,7 @@ func (s LeaseStore) Publish( ctx context.Context, tableID sqlbase.ID, update func(*sqlbase.MutableTableDescriptor) error, - logEvent func(*client.Txn) error, + logEvent func(*kv.Txn) error, ) (*sqlbase.ImmutableTableDescriptor, error) { tableIDs := []sqlbase.ID{tableID} updates := func(descs map[sqlbase.ID]*sqlbase.MutableTableDescriptor) error { @@ -549,7 +549,7 @@ func (s LeaseStore) getForExpiration( ctx context.Context, expiration hlc.Timestamp, id sqlbase.ID, ) (*tableVersionState, error) { var table *tableVersionState - err := s.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { descKey := sqlbase.MakeDescMetadataKey(id) prevTimestamp := expiration.Prev() txn.SetFixedTimestamp(ctx, prevTimestamp) @@ -1106,7 +1106,7 @@ func releaseLease(lease *storedTableLease, m *LeaseManager) { // If t has no active leases, nothing is done. func purgeOldVersions( ctx context.Context, - db *client.DB, + db *kv.DB, id sqlbase.ID, takenOffline bool, minVersion sqlbase.DescriptorVersion, @@ -1409,7 +1409,7 @@ const leaseConcurrencyLimit = 5 func NewLeaseManager( ambientCtx log.AmbientContext, nodeIDContainer *base.NodeIDContainer, - db *client.DB, + db *kv.DB, clock *hlc.Clock, internalExecutor sqlutil.InternalExecutor, settings *cluster.Settings, @@ -1597,7 +1597,7 @@ func (m *LeaseManager) resolveName( tableName string, ) (sqlbase.ID, error) { id := sqlbase.InvalidID - if err := m.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := m.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { txn.SetFixedTimestamp(ctx, timestamp) var found bool var err error @@ -1740,7 +1740,7 @@ func (m *LeaseManager) findTableState(tableID sqlbase.ID, create bool) *tableSta // RefreshLeases starts a goroutine that refreshes the lease manager // leases for tables received in the latest system configuration via gossip. -func (m *LeaseManager) RefreshLeases(s *stop.Stopper, db *client.DB, g *gossip.Gossip) { +func (m *LeaseManager) RefreshLeases(s *stop.Stopper, db *kv.DB, g *gossip.Gossip) { ctx := context.TODO() s.RunWorker(ctx, func(ctx context.Context) { descKeyPrefix := keys.MakeTablePrefix(uint32(sqlbase.DescriptorTable.ID)) diff --git a/pkg/sql/lease_test.go b/pkg/sql/lease_test.go index d8a24a606857..d45c60456fb4 100644 --- a/pkg/sql/lease_test.go +++ b/pkg/sql/lease_test.go @@ -24,8 +24,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql" @@ -49,7 +49,7 @@ type leaseTest struct { testing.TB server serverutils.TestServerInterface db *gosql.DB - kvDB *client.DB + kvDB *kv.DB nodes map[uint32]*sql.LeaseManager leaseManagerTestingKnobs sql.LeaseManagerTestingKnobs cfg *base.LeaseManagerConfig @@ -1662,7 +1662,7 @@ CREATE TABLE t.test0 (k CHAR PRIMARY KEY, v CHAR); // the transaction commit time (and that the txn commit time wasn't // bumped past it). log.Infof(ctx, "checking version %d", table.Version) - txn := client.NewTxn(ctx, t.kvDB, roachpb.NodeID(0)) + txn := kv.NewTxn(ctx, t.kvDB, roachpb.NodeID(0)) // Make the txn look back at the known modification timestamp. txn.SetFixedTimestamp(ctx, table.ModificationTime) diff --git a/pkg/sql/logical_schema_accessors.go b/pkg/sql/logical_schema_accessors.go index 41d692ab1dde..cfd012fa3e3f 100644 --- a/pkg/sql/logical_schema_accessors.go +++ b/pkg/sql/logical_schema_accessors.go @@ -13,7 +13,7 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -35,7 +35,7 @@ var _ SchemaAccessor = &LogicalSchemaAccessor{} // IsValidSchema implements the DatabaseLister interface. func (l *LogicalSchemaAccessor) IsValidSchema( - ctx context.Context, txn *client.Txn, dbID sqlbase.ID, scName string, + ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, scName string, ) (bool, sqlbase.ID, error) { if _, ok := l.vt.getVirtualSchemaEntry(scName); ok { return true, sqlbase.InvalidID, nil @@ -48,7 +48,7 @@ func (l *LogicalSchemaAccessor) IsValidSchema( // GetObjectNames implements the DatabaseLister interface. func (l *LogicalSchemaAccessor) GetObjectNames( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, dbDesc *DatabaseDescriptor, scName string, flags tree.DatabaseListFlags, @@ -72,7 +72,7 @@ func (l *LogicalSchemaAccessor) GetObjectNames( // GetObjectDesc implements the ObjectAccessor interface. func (l *LogicalSchemaAccessor) GetObjectDesc( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, settings *cluster.Settings, name *ObjectName, flags tree.ObjectLookupFlags, diff --git a/pkg/sql/old_foreign_key_desc_test.go b/pkg/sql/old_foreign_key_desc_test.go index 3a9a1e63f28e..3bfc8ecacc9a 100644 --- a/pkg/sql/old_foreign_key_desc_test.go +++ b/pkg/sql/old_foreign_key_desc_test.go @@ -15,7 +15,7 @@ import ( "reflect" "testing" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -104,7 +104,7 @@ CREATE INDEX ON t.t1 (x); tbl.InboundFKs = nil return tbl } - err := kvDB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() newDesc := downgradeForeignKey(desc) if err := writeDescToBatch(ctx, false, s.ClusterSettings(), b, desc.ID, newDesc); err != nil { diff --git a/pkg/sql/physical_schema_accessors.go b/pkg/sql/physical_schema_accessors.go index c5e079e6661c..1ff442a263c6 100644 --- a/pkg/sql/physical_schema_accessors.go +++ b/pkg/sql/physical_schema_accessors.go @@ -14,7 +14,7 @@ import ( "bytes" "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -46,7 +46,7 @@ var _ SchemaAccessor = UncachedPhysicalAccessor{} // GetDatabaseDesc implements the SchemaAccessor interface. func (a UncachedPhysicalAccessor) GetDatabaseDesc( - ctx context.Context, txn *client.Txn, name string, flags tree.DatabaseLookupFlags, + ctx context.Context, txn *kv.Txn, name string, flags tree.DatabaseLookupFlags, ) (desc *DatabaseDescriptor, err error) { if name == sqlbase.SystemDB.Name { // We can't return a direct reference to SystemDB, because the @@ -75,7 +75,7 @@ func (a UncachedPhysicalAccessor) GetDatabaseDesc( // IsValidSchema implements the SchemaAccessor interface. func (a UncachedPhysicalAccessor) IsValidSchema( - ctx context.Context, txn *client.Txn, dbID sqlbase.ID, scName string, + ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, scName string, ) (bool, sqlbase.ID, error) { return resolveSchemaID(ctx, txn, dbID, scName) } @@ -83,7 +83,7 @@ func (a UncachedPhysicalAccessor) IsValidSchema( // GetObjectNames implements the SchemaAccessor interface. func (a UncachedPhysicalAccessor) GetObjectNames( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, dbDesc *DatabaseDescriptor, scName string, flags tree.DatabaseListFlags, @@ -167,7 +167,7 @@ func (a UncachedPhysicalAccessor) GetObjectNames( // GetObjectDesc implements the SchemaAccessor interface. func (a UncachedPhysicalAccessor) GetObjectDesc( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, settings *cluster.Settings, name *ObjectName, flags tree.ObjectLookupFlags, @@ -255,7 +255,7 @@ var _ SchemaAccessor = &CachedPhysicalAccessor{} // GetDatabaseDesc implements the SchemaAccessor interface. func (a *CachedPhysicalAccessor) GetDatabaseDesc( - ctx context.Context, txn *client.Txn, name string, flags tree.DatabaseLookupFlags, + ctx context.Context, txn *kv.Txn, name string, flags tree.DatabaseLookupFlags, ) (desc *DatabaseDescriptor, err error) { isSystemDB := name == sqlbase.SystemDB.Name if !(flags.AvoidCached || isSystemDB || testDisableTableLeases) { @@ -285,7 +285,7 @@ func (a *CachedPhysicalAccessor) GetDatabaseDesc( // IsValidSchema implements the SchemaAccessor interface. func (a *CachedPhysicalAccessor) IsValidSchema( - ctx context.Context, txn *client.Txn, dbID sqlbase.ID, scName string, + ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, scName string, ) (bool, sqlbase.ID, error) { return a.tc.resolveSchemaID(ctx, txn, dbID, scName) } @@ -293,7 +293,7 @@ func (a *CachedPhysicalAccessor) IsValidSchema( // GetObjectDesc implements the SchemaAccessor interface. func (a *CachedPhysicalAccessor) GetObjectDesc( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, settings *cluster.Settings, name *ObjectName, flags tree.ObjectLookupFlags, diff --git a/pkg/sql/physicalplan/aggregator_funcs_test.go b/pkg/sql/physicalplan/aggregator_funcs_test.go index c1ed8a28662e..72d1435427f8 100644 --- a/pkg/sql/physicalplan/aggregator_funcs_test.go +++ b/pkg/sql/physicalplan/aggregator_funcs_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/distsql" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -53,7 +53,7 @@ var ( func runTestFlow( t *testing.T, srv serverutils.TestServerInterface, - txn *client.Txn, + txn *kv.Txn, procs ...execinfrapb.ProcessorSpec, ) sqlbase.EncDatumRows { distSQLSrv := srv.DistSQLServer().(*distsql.ServerImpl) @@ -152,7 +152,7 @@ func checkDistAggregationInfo( } } - txn := client.NewTxn(ctx, srv.DB(), srv.NodeID()) + txn := kv.NewTxn(ctx, srv.DB(), srv.NodeID()) // First run a flow that aggregates all the rows without any local stages. diff --git a/pkg/sql/physicalplan/fake_span_resolver.go b/pkg/sql/physicalplan/fake_span_resolver.go index fab3c245c6f0..767527275a76 100644 --- a/pkg/sql/physicalplan/fake_span_resolver.go +++ b/pkg/sql/physicalplan/fake_span_resolver.go @@ -15,8 +15,8 @@ import ( "context" "math/rand" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -54,7 +54,7 @@ type fakeSpanResolverIterator struct { // scans are performed in the context of this txn - the same one using the // results of the resolver - so that using the resolver doesn't introduce // conflicts. - txn *client.Txn + txn *kv.Txn err error // ranges are ordered by the key; the start key of the first one is the @@ -64,7 +64,7 @@ type fakeSpanResolverIterator struct { } // NewSpanResolverIterator is part of the SpanResolver interface. -func (fsr *fakeSpanResolver) NewSpanResolverIterator(txn *client.Txn) SpanResolverIterator { +func (fsr *fakeSpanResolver) NewSpanResolverIterator(txn *kv.Txn) SpanResolverIterator { return &fakeSpanResolverIterator{fsr: fsr, txn: txn} } diff --git a/pkg/sql/physicalplan/fake_span_resolver_test.go b/pkg/sql/physicalplan/fake_span_resolver_test.go index 8ea6a09f1da0..2d8f96e4a652 100644 --- a/pkg/sql/physicalplan/fake_span_resolver_test.go +++ b/pkg/sql/physicalplan/fake_span_resolver_test.go @@ -15,8 +15,8 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -50,7 +50,7 @@ func TestFakeSpanResolver(t *testing.T) { db := tc.Server(0).DB() - txn := client.NewTxn(ctx, db, tc.Server(0).NodeID()) + txn := kv.NewTxn(ctx, db, tc.Server(0).NodeID()) it := resolver.NewSpanResolverIterator(txn) tableDesc := sqlbase.GetTableDescriptor(db, "test", "t") diff --git a/pkg/sql/physicalplan/replicaoracle/oracle.go b/pkg/sql/physicalplan/replicaoracle/oracle.go index 0a9ce82efaf5..9f9aca7bfee2 100644 --- a/pkg/sql/physicalplan/replicaoracle/oracle.go +++ b/pkg/sql/physicalplan/replicaoracle/oracle.go @@ -18,7 +18,7 @@ import ( "math/rand" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" @@ -70,7 +70,7 @@ type Oracle interface { // OracleFactory creates an oracle for a Txn. type OracleFactory interface { - Oracle(*client.Txn) Oracle + Oracle(*kv.Txn) Oracle } // OracleFactoryFunc creates an OracleFactory from a Config. @@ -126,7 +126,7 @@ func newRandomOracleFactory(cfg Config) OracleFactory { return &randomOracle{gossip: cfg.Gossip} } -func (o *randomOracle) Oracle(_ *client.Txn) Oracle { +func (o *randomOracle) Oracle(_ *kv.Txn) Oracle { return o } @@ -156,7 +156,7 @@ func newClosestOracleFactory(cfg Config) OracleFactory { } } -func (o *closestOracle) Oracle(_ *client.Txn) Oracle { +func (o *closestOracle) Oracle(_ *kv.Txn) Oracle { return o } @@ -208,7 +208,7 @@ func newBinPackingOracleFactory(cfg Config) OracleFactory { var _ OracleFactory = &binPackingOracle{} -func (o *binPackingOracle) Oracle(_ *client.Txn) Oracle { +func (o *binPackingOracle) Oracle(_ *kv.Txn) Oracle { return o } diff --git a/pkg/sql/physicalplan/span_resolver.go b/pkg/sql/physicalplan/span_resolver.go index 766d566b3d52..4b2a9516ae24 100644 --- a/pkg/sql/physicalplan/span_resolver.go +++ b/pkg/sql/physicalplan/span_resolver.go @@ -14,8 +14,8 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" @@ -63,7 +63,7 @@ import ( type SpanResolver interface { // NewSpanResolverIterator creates a new SpanResolverIterator. // Txn is used for testing and for determining if follower reads are possible. - NewSpanResolverIterator(txn *client.Txn) SpanResolverIterator + NewSpanResolverIterator(txn *kv.Txn) SpanResolverIterator } // SpanResolverIterator is used to iterate over the ranges composing a key span. @@ -172,7 +172,7 @@ type spanResolverIterator struct { var _ SpanResolverIterator = &spanResolverIterator{} // NewSpanResolverIterator creates a new SpanResolverIterator. -func (sr *spanResolver) NewSpanResolverIterator(txn *client.Txn) SpanResolverIterator { +func (sr *spanResolver) NewSpanResolverIterator(txn *kv.Txn) SpanResolverIterator { return &spanResolverIterator{ gossip: sr.gossip, it: kvcoord.NewRangeIterator(sr.distSender), diff --git a/pkg/sql/physicalplan/span_resolver_test.go b/pkg/sql/physicalplan/span_resolver_test.go index b45e65b3da10..31fbee285922 100644 --- a/pkg/sql/physicalplan/span_resolver_test.go +++ b/pkg/sql/physicalplan/span_resolver_test.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" @@ -310,7 +310,7 @@ func TestMixedDirections(t *testing.T) { } func setupRanges( - db *gosql.DB, s *server.TestServer, cdb *client.DB, t *testing.T, + db *gosql.DB, s *server.TestServer, cdb *kv.DB, t *testing.T, ) ([]roachpb.RangeDescriptor, *sqlbase.TableDescriptor) { if _, err := db.Exec(`CREATE DATABASE t`); err != nil { t.Fatal(err) diff --git a/pkg/sql/plan.go b/pkg/sql/plan.go index ba496a629a4a..71e5799cf932 100644 --- a/pkg/sql/plan.go +++ b/pkg/sql/plan.go @@ -14,7 +14,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/opt/memo" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -389,7 +389,7 @@ func startExec(params runParams, plan planNode) error { }, leaveNode: func(_ string, n planNode) (err error) { if _, ok := n.(planNodeReadingOwnWrites); ok { - prevMode := params.p.Txn().ConfigureStepping(params.ctx, client.SteppingDisabled) + prevMode := params.p.Txn().ConfigureStepping(params.ctx, kv.SteppingDisabled) defer func() { _ = params.p.Txn().ConfigureStepping(params.ctx, prevMode) }() } return n.startExec(params) diff --git a/pkg/sql/planner.go b/pkg/sql/planner.go index 950c0ceded1c..c72c6bc755e8 100644 --- a/pkg/sql/planner.go +++ b/pkg/sql/planner.go @@ -15,8 +15,8 @@ import ( "fmt" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/sql/opt/exec" "github.com/cockroachdb/cockroach/pkg/sql/parser" @@ -114,7 +114,7 @@ type schemaInterface struct { // planners are usually created by using the newPlanner method on a Session. // If one needs to be created outside of a Session, use makeInternalPlanner(). type planner struct { - txn *client.Txn + txn *kv.Txn // Reference to the corresponding sql Statement for this query. stmt *Statement @@ -208,7 +208,7 @@ var noteworthyInternalMemoryUsageBytes = envutil.EnvOrDefaultInt64("COCKROACH_NO // NewInternalPlanner is an exported version of newInternalPlanner. It // returns an interface{} so it can be used outside of the sql package. func NewInternalPlanner( - opName string, txn *client.Txn, user string, memMetrics *MemoryMetrics, execCfg *ExecutorConfig, + opName string, txn *kv.Txn, user string, memMetrics *MemoryMetrics, execCfg *ExecutorConfig, ) (interface{}, func()) { return newInternalPlanner(opName, txn, user, memMetrics, execCfg) } @@ -222,7 +222,7 @@ func NewInternalPlanner( // Returns a cleanup function that must be called once the caller is done with // the planner. func newInternalPlanner( - opName string, txn *client.Txn, user string, memMetrics *MemoryMetrics, execCfg *ExecutorConfig, + opName string, txn *kv.Txn, user string, memMetrics *MemoryMetrics, execCfg *ExecutorConfig, ) (*planner, func()) { // We need a context that outlives all the uses of the planner (since the // planner captures it in the EvalCtx, and so does the cleanup function that @@ -325,7 +325,7 @@ func internalExtendedEvalCtx( sd *sessiondata.SessionData, dataMutator *sessionDataMutator, tables *TableCollection, - txn *client.Txn, + txn *kv.Txn, txnTimestamp time.Time, stmtTimestamp time.Time, execCfg *ExecutorConfig, @@ -404,7 +404,7 @@ func (p *planner) LeaseMgr() *LeaseManager { return p.Tables().leaseMgr } -func (p *planner) Txn() *client.Txn { +func (p *planner) Txn() *kv.Txn { return p.txn } diff --git a/pkg/sql/privileged_accessor_test.go b/pkg/sql/privileged_accessor_test.go index 5e94b889cc7f..b1a695c80dee 100644 --- a/pkg/sql/privileged_accessor_test.go +++ b/pkg/sql/privileged_accessor_test.go @@ -14,7 +14,7 @@ import ( "context" "testing" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/tests" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -33,7 +33,7 @@ func TestLookupNamespaceIDFallback(t *testing.T) { s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) - err := kvDB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return txn.Put( ctx, sqlbase.NewDeprecatedTableKey(999, "bob").Key(), diff --git a/pkg/sql/relocate.go b/pkg/sql/relocate.go index 1fb4a89fbb5b..a33630064af5 100644 --- a/pkg/sql/relocate.go +++ b/pkg/sql/relocate.go @@ -14,8 +14,8 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -145,7 +145,7 @@ func (n *relocateNode) Close(ctx context.Context) { } func lookupRangeDescriptor( - ctx context.Context, db *client.DB, rowKey []byte, + ctx context.Context, db *kv.DB, rowKey []byte, ) (roachpb.RangeDescriptor, error) { startKey := keys.RangeMetaKey(keys.MustAddr(rowKey)) endKey := keys.Meta2Prefix.PrefixEnd() diff --git a/pkg/sql/rename_table.go b/pkg/sql/rename_table.go index 5bd06c7f8530..eddc6c130d2e 100644 --- a/pkg/sql/rename_table.go +++ b/pkg/sql/rename_table.go @@ -14,7 +14,7 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -132,7 +132,7 @@ func (n *renameTableNode) startExec(params runParams) error { // We update the descriptor to the new name, but also leave the mapping of the // old name to the id, so that the name is not reused until the schema changer // has made sure it's not in use any more. - b := &client.Batch{} + b := &kv.Batch{} if p.extendedEvalCtx.Tracing.KVTracingEnabled() { log.VEventf(ctx, 2, "CPut %s -> %d", newTbKey, descID) } diff --git a/pkg/sql/resolver.go b/pkg/sql/resolver.go index e65b2ded8f49..e473e69cd31d 100644 --- a/pkg/sql/resolver.go +++ b/pkg/sql/resolver.go @@ -14,7 +14,7 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -39,7 +39,7 @@ type SchemaResolver interface { tree.TableNameExistingResolver tree.TableNameTargetResolver - Txn() *client.Txn + Txn() *kv.Txn LogicalSchemaAccessor() SchemaAccessor CurrentDatabase() string CurrentSearchPath() sessiondata.SearchPath @@ -67,7 +67,7 @@ func (p *planner) ResolveUncachedDatabaseByName( // explicit schema and catalog name. func GetObjectNames( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, sc SchemaResolver, dbDesc *DatabaseDescriptor, scName string, @@ -397,7 +397,7 @@ func (p *planner) getQualifiedTableName( // returned will be nil. func findTableContainingIndex( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, sc SchemaResolver, dbName, scName string, idxName tree.UnrestrictedName, @@ -467,7 +467,7 @@ func expandMutableIndexName( func expandIndexName( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, sc SchemaResolver, index *tree.TableIndexName, requireTable bool, diff --git a/pkg/sql/revert.go b/pkg/sql/revert.go index b4f5b5e8e794..83031d44445a 100644 --- a/pkg/sql/revert.go +++ b/pkg/sql/revert.go @@ -13,7 +13,7 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -30,7 +30,7 @@ const RevertTableDefaultBatchSize = 500000 // RevertTables reverts the passed table to the target time. func RevertTables( ctx context.Context, - db *client.DB, + db *kv.DB, tables []*sqlbase.TableDescriptor, targetTime hlc.Timestamp, batchSize int64, @@ -77,7 +77,7 @@ func RevertTables( // parallel (since we're passing a key limit, distsender won't do its usual // splitting/parallel sending to separate ranges). for len(spans) != 0 { - var b client.Batch + var b kv.Batch for _, span := range spans { b.AddRawRequest(&roachpb.RevertRangeRequest{ RequestHeader: roachpb.RequestHeader{ diff --git a/pkg/sql/row/cascader.go b/pkg/sql/row/cascader.go index ac7a673b42f7..095ab8f0db66 100644 --- a/pkg/sql/row/cascader.go +++ b/pkg/sql/row/cascader.go @@ -13,7 +13,7 @@ package row import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -30,7 +30,7 @@ import ( // cascader is used to handle all referential integrity cascading actions. type cascader struct { - txn *client.Txn + txn *kv.Txn fkTables FkTableMetadata alloc *sqlbase.DatumAlloc evalCtx *tree.EvalContext @@ -53,7 +53,7 @@ type cascader struct { // a possible cascade. It returns a cascader if one is required and nil if not. func makeDeleteCascader( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, table *sqlbase.ImmutableTableDescriptor, tablesByID FkTableMetadata, evalCtx *tree.EvalContext, @@ -107,7 +107,7 @@ func makeDeleteCascader( // it will also enable any interleaved read part to observe the // mutation, and thus introduce the risk of a Halloween problem for // any mutation that uses FK relationships. - _ = txn.ConfigureStepping(ctx, client.SteppingDisabled) + _ = txn.ConfigureStepping(ctx, kv.SteppingDisabled) return &cascader{ txn: txn, @@ -129,7 +129,7 @@ func makeDeleteCascader( // a possible cascade. It returns a cascader if one is required and nil if not. func makeUpdateCascader( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, table *sqlbase.ImmutableTableDescriptor, tablesByID FkTableMetadata, updateCols []sqlbase.ColumnDescriptor, @@ -198,7 +198,7 @@ func makeUpdateCascader( // it will also enable any interleaved read part to observe the // mutation, and thus introduce the risk of a Halloween problem for // any mutation that uses FK relationships. - _ = txn.ConfigureStepping(ctx, client.SteppingDisabled) + _ = txn.ConfigureStepping(ctx, kv.SteppingDisabled) return &cascader{ txn: txn, diff --git a/pkg/sql/row/deleter.go b/pkg/sql/row/deleter.go index 1aebb8763ef4..7b70baa7f784 100644 --- a/pkg/sql/row/deleter.go +++ b/pkg/sql/row/deleter.go @@ -13,8 +13,8 @@ package row import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -39,7 +39,7 @@ type Deleter struct { // passed in requestedCols will be included in FetchCols. func MakeDeleter( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, tableDesc *sqlbase.ImmutableTableDescriptor, fkTables FkTableMetadata, requestedCols []sqlbase.ColumnDescriptor, @@ -67,7 +67,7 @@ func MakeDeleter( // additional cascader. func makeRowDeleterWithoutCascader( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, tableDesc *sqlbase.ImmutableTableDescriptor, fkTables FkTableMetadata, requestedCols []sqlbase.ColumnDescriptor, @@ -130,11 +130,7 @@ func makeRowDeleterWithoutCascader( // orphaned rows. The bytesMonitor is only used if cascading/fk checking and can // be nil if not. func (rd *Deleter) DeleteRow( - ctx context.Context, - b *client.Batch, - values []tree.Datum, - checkFKs checkFKConstraints, - traceKV bool, + ctx context.Context, b *kv.Batch, values []tree.Datum, checkFKs checkFKConstraints, traceKV bool, ) error { // Delete the row from any secondary indices. @@ -199,11 +195,7 @@ func (rd *Deleter) DeleteRow( // DeleteIndexRow adds to the batch the kv operations necessary to delete a // table row from the given index. func (rd *Deleter) DeleteIndexRow( - ctx context.Context, - b *client.Batch, - idx *sqlbase.IndexDescriptor, - values []tree.Datum, - traceKV bool, + ctx context.Context, b *kv.Batch, idx *sqlbase.IndexDescriptor, values []tree.Datum, traceKV bool, ) error { if rd.Fks.checker != nil { if err := rd.Fks.addAllIdxChecks(ctx, values, traceKV); err != nil { diff --git a/pkg/sql/row/errors.go b/pkg/sql/row/errors.go index 3bdd028202f4..df74e531462e 100644 --- a/pkg/sql/row/errors.go +++ b/pkg/sql/row/errors.go @@ -14,7 +14,7 @@ import ( "context" "strings" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -47,7 +47,7 @@ func (f *singleKVFetcher) GetRangesInfo() []roachpb.RangeInfo { // ConvertBatchError returns a user friendly constraint violation error. func ConvertBatchError( - ctx context.Context, tableDesc *sqlbase.ImmutableTableDescriptor, b *client.Batch, + ctx context.Context, tableDesc *sqlbase.ImmutableTableDescriptor, b *kv.Batch, ) error { origPErr := b.MustPErr() if origPErr.Index == nil { diff --git a/pkg/sql/row/fetcher.go b/pkg/sql/row/fetcher.go index bcc18d72b46f..59e995f3dd55 100644 --- a/pkg/sql/row/fetcher.go +++ b/pkg/sql/row/fetcher.go @@ -17,8 +17,8 @@ import ( "strings" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/scrub" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -458,7 +458,7 @@ func (rf *Fetcher) Init( // times. func (rf *Fetcher) StartScan( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, spans roachpb.Spans, limitBatches bool, limitHint int64, @@ -495,7 +495,7 @@ func (rf *Fetcher) StartScan( // Can be used multiple times. func (rf *Fetcher) StartInconsistentScan( ctx context.Context, - db *client.DB, + db *kv.DB, initialTimestamp hlc.Timestamp, maxTimestampAge time.Duration, spans roachpb.Spans, @@ -515,7 +515,7 @@ func (rf *Fetcher) StartInconsistentScan( maxTimestampAge, ) } - txn := client.NewTxnWithSteppingEnabled(ctx, db, 0 /* gatewayNodeID */) + txn := kv.NewTxnWithSteppingEnabled(ctx, db, 0 /* gatewayNodeID */) txn.SetFixedTimestamp(ctx, txnTimestamp) if log.V(1) { log.Infof(ctx, "starting inconsistent scan at timestamp %v", txnTimestamp) @@ -530,7 +530,7 @@ func (rf *Fetcher) StartInconsistentScan( // Advance the timestamp by the time that passed. txnTimestamp = txnTimestamp.Add(now.Sub(txnStartTime).Nanoseconds(), 0 /* logical */) txnStartTime = now - txn = client.NewTxnWithSteppingEnabled(ctx, db, 0 /* gatewayNodeID */) + txn = kv.NewTxnWithSteppingEnabled(ctx, db, 0 /* gatewayNodeID */) txn.SetFixedTimestamp(ctx, txnTimestamp) if log.V(1) { diff --git a/pkg/sql/row/fetcher_test.go b/pkg/sql/row/fetcher_test.go index ba4feb530711..9ba4ea64494e 100644 --- a/pkg/sql/row/fetcher_test.go +++ b/pkg/sql/row/fetcher_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -165,7 +165,7 @@ func TestNextRowSingle(t *testing.T) { if err := rf.StartScan( context.TODO(), - client.NewTxn(ctx, kvDB, 0), + kv.NewTxn(ctx, kvDB, 0), roachpb.Spans{tableDesc.IndexSpan(tableDesc.PrimaryIndex.ID)}, false, /*limitBatches*/ 0, /*limitHint*/ @@ -285,7 +285,7 @@ func TestNextRowBatchLimiting(t *testing.T) { if err := rf.StartScan( context.TODO(), - client.NewTxn(ctx, kvDB, 0), + kv.NewTxn(ctx, kvDB, 0), roachpb.Spans{tableDesc.IndexSpan(tableDesc.PrimaryIndex.ID)}, true, /*limitBatches*/ 10, /*limitHint*/ @@ -413,7 +413,7 @@ INDEX(c) if err := rf.StartScan( context.TODO(), - client.NewTxn(ctx, kvDB, 0), + kv.NewTxn(ctx, kvDB, 0), roachpb.Spans{indexSpan, roachpb.Span{Key: midKey, EndKey: endKey}, }, @@ -577,7 +577,7 @@ func TestNextRowSecondaryIndex(t *testing.T) { if err := rf.StartScan( context.TODO(), - client.NewTxn(ctx, kvDB, 0), + kv.NewTxn(ctx, kvDB, 0), roachpb.Spans{tableDesc.IndexSpan(tableDesc.Indexes[0].ID)}, false, /*limitBatches*/ 0, /*limitHint*/ @@ -938,7 +938,7 @@ func TestNextRowInterleaved(t *testing.T) { if err := rf.StartScan( context.TODO(), - client.NewTxn(ctx, kvDB, 0), + kv.NewTxn(ctx, kvDB, 0), lookupSpans, false, /*limitBatches*/ 0, /*limitHint*/ diff --git a/pkg/sql/row/fk_existence_base.go b/pkg/sql/row/fk_existence_base.go index 8716035e31cc..4c3119b46889 100644 --- a/pkg/sql/row/fk_existence_base.go +++ b/pkg/sql/row/fk_existence_base.go @@ -13,7 +13,7 @@ package row import ( "sort" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -30,7 +30,7 @@ import ( // txn should be passed as argument. type fkExistenceCheckBaseHelper struct { // txn is the current KV transaction. - txn *client.Txn + txn *kv.Txn // dir indicates the direction of the check. // @@ -117,7 +117,7 @@ type fkExistenceCheckBaseHelper struct { // TODO(knz): this should become homogeneous across the 3 packages // sql, sqlbase, row. The proliferation is annoying. func makeFkExistenceCheckBaseHelper( - txn *client.Txn, + txn *kv.Txn, otherTables FkTableMetadata, ref *sqlbase.ForeignKeyConstraint, searchIdx *sqlbase.IndexDescriptor, diff --git a/pkg/sql/row/fk_existence_batch.go b/pkg/sql/row/fk_existence_batch.go index 2eaa28f40d79..e127440b3554 100644 --- a/pkg/sql/row/fk_existence_batch.go +++ b/pkg/sql/row/fk_existence_batch.go @@ -13,7 +13,7 @@ package row import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -31,7 +31,7 @@ type fkExistenceBatchChecker struct { // // TODO(knz): Don't do this. txn objects, like contexts, // should not be captured in structs. - txn *client.Txn + txn *kv.Txn // batch is the accumulated batch of existence checks so far. batch roachpb.BatchRequest diff --git a/pkg/sql/row/fk_existence_delete.go b/pkg/sql/row/fk_existence_delete.go index f196bdc2777c..a8acc6a409d8 100644 --- a/pkg/sql/row/fk_existence_delete.go +++ b/pkg/sql/row/fk_existence_delete.go @@ -13,7 +13,7 @@ package row import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/errors" @@ -35,7 +35,7 @@ type fkExistenceCheckForDelete struct { // makeFkExistenceCheckHelperForDelete instantiates a delete helper. func makeFkExistenceCheckHelperForDelete( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, table *sqlbase.ImmutableTableDescriptor, otherTables FkTableMetadata, colMap map[sqlbase.ColumnID]int, @@ -114,7 +114,7 @@ func makeFkExistenceCheckHelperForDelete( // it will also enable any interleaved read part to observe the // mutation, and thus introduce the risk of a Halloween problem for // any mutation that uses FK relationships. - _ = txn.ConfigureStepping(ctx, client.SteppingDisabled) + _ = txn.ConfigureStepping(ctx, kv.SteppingDisabled) } return h, nil diff --git a/pkg/sql/row/fk_existence_insert.go b/pkg/sql/row/fk_existence_insert.go index 98f1c6266df0..e784ea6920fa 100644 --- a/pkg/sql/row/fk_existence_insert.go +++ b/pkg/sql/row/fk_existence_insert.go @@ -13,7 +13,7 @@ package row import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/errors" @@ -44,7 +44,7 @@ type fkExistenceCheckForInsert struct { // makeFkExistenceCheckHelperForInsert instantiates an insert helper. func makeFkExistenceCheckHelperForInsert( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, table *sqlbase.ImmutableTableDescriptor, otherTables FkTableMetadata, colMap map[sqlbase.ColumnID]int, @@ -105,7 +105,7 @@ func makeFkExistenceCheckHelperForInsert( // it will also enable any interleaved read part to observe the // mutation, and thus introduce the risk of a Halloween problem for // any mutation that uses FK relationships. - _ = txn.ConfigureStepping(ctx, client.SteppingDisabled) + _ = txn.ConfigureStepping(ctx, kv.SteppingDisabled) } return h, nil diff --git a/pkg/sql/row/fk_existence_update.go b/pkg/sql/row/fk_existence_update.go index ed7674d06ec7..0aba15b8fe4e 100644 --- a/pkg/sql/row/fk_existence_update.go +++ b/pkg/sql/row/fk_existence_update.go @@ -13,7 +13,7 @@ package row import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" ) @@ -63,7 +63,7 @@ type fkExistenceCheckForUpdate struct { // makeFkExistenceCheckHelperForUpdate instantiates an update helper. func makeFkExistenceCheckHelperForUpdate( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, table *sqlbase.ImmutableTableDescriptor, otherTables FkTableMetadata, updateCols []sqlbase.ColumnDescriptor, diff --git a/pkg/sql/row/inserter.go b/pkg/sql/row/inserter.go index bfebcfc5f526..1a67371fcbeb 100644 --- a/pkg/sql/row/inserter.go +++ b/pkg/sql/row/inserter.go @@ -14,7 +14,7 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -41,7 +41,7 @@ type Inserter struct { // insertCols must contain every column in the primary key. func MakeInserter( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, tableDesc *sqlbase.ImmutableTableDescriptor, insertCols []sqlbase.ColumnDescriptor, checkFKs checkFKConstraints, diff --git a/pkg/sql/row/kv_batch_fetcher.go b/pkg/sql/row/kv_batch_fetcher.go index 4b5ab8bc9790..6d8ab11e7deb 100644 --- a/pkg/sql/row/kv_batch_fetcher.go +++ b/pkg/sql/row/kv_batch_fetcher.go @@ -15,7 +15,7 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -172,7 +172,7 @@ func (f *txnKVFetcher) getKeyLockingStrength() lock.Strength { // // Batch limits can only be used if the spans are ordered. func makeKVBatchFetcher( - txn *client.Txn, + txn *kv.Txn, spans roachpb.Spans, reverse bool, useBatchLimit bool, diff --git a/pkg/sql/row/kv_fetcher.go b/pkg/sql/row/kv_fetcher.go index 7743a40f0436..6f861710ba51 100644 --- a/pkg/sql/row/kv_fetcher.go +++ b/pkg/sql/row/kv_fetcher.go @@ -13,7 +13,7 @@ package row import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" @@ -34,7 +34,7 @@ type KVFetcher struct { // NewKVFetcher creates a new KVFetcher. func NewKVFetcher( - txn *client.Txn, + txn *kv.Txn, spans roachpb.Spans, reverse bool, useBatchLimit bool, diff --git a/pkg/sql/row/updater.go b/pkg/sql/row/updater.go index e7883058543a..7dc93e67240c 100644 --- a/pkg/sql/row/updater.go +++ b/pkg/sql/row/updater.go @@ -14,8 +14,8 @@ import ( "bytes" "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -73,7 +73,7 @@ const ( // passed in requestedCols will be included in FetchCols at the beginning. func MakeUpdater( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, tableDesc *sqlbase.ImmutableTableDescriptor, fkTables FkTableMetadata, updateCols []sqlbase.ColumnDescriptor, @@ -110,7 +110,7 @@ var returnTruePseudoError error = returnTrue{} // create a cascader. func makeUpdaterWithoutCascader( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, tableDesc *sqlbase.ImmutableTableDescriptor, fkTables FkTableMetadata, updateCols []sqlbase.ColumnDescriptor, @@ -296,7 +296,7 @@ func makeUpdaterWithoutCascader( // The return value is only good until the next call to UpdateRow. func (ru *Updater) UpdateRow( ctx context.Context, - batch *client.Batch, + batch *kv.Batch, oldValues []tree.Datum, updateValues []tree.Datum, checkFKs checkFKConstraints, diff --git a/pkg/sql/rowexec/backfiller.go b/pkg/sql/rowexec/backfiller.go index ea957179b576..4c0bda87f3c6 100644 --- a/pkg/sql/rowexec/backfiller.go +++ b/pkg/sql/rowexec/backfiller.go @@ -15,9 +15,9 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/backfill" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -236,7 +236,7 @@ func (b *backfiller) mainLoop( func GetResumeSpans( ctx context.Context, jobsRegistry *jobs.Registry, - txn *client.Txn, + txn *kv.Txn, tableID sqlbase.ID, mutationID sqlbase.MutationID, filter backfill.MutationFilter, @@ -296,7 +296,7 @@ func GetResumeSpans( // SetResumeSpansInJob adds a list of resume spans into a job details field. func SetResumeSpansInJob( - ctx context.Context, spans []roachpb.Span, mutationIdx int, txn *client.Txn, job *jobs.Job, + ctx context.Context, spans []roachpb.Span, mutationIdx int, txn *kv.Txn, job *jobs.Job, ) error { details, ok := job.Details().(jobspb.SchemaChangeDetails) if !ok { @@ -311,7 +311,7 @@ func SetResumeSpansInJob( // resume is the left over work from origSpan. func WriteResumeSpan( ctx context.Context, - db *client.DB, + db *kv.DB, id sqlbase.ID, mutationID sqlbase.MutationID, filter backfill.MutationFilter, @@ -321,7 +321,7 @@ func WriteResumeSpan( ctx, traceSpan := tracing.ChildSpan(ctx, "checkpoint") defer tracing.FinishSpan(traceSpan) - return db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { resumeSpans, job, mutationIdx, error := GetResumeSpans(ctx, jobsRegistry, txn, id, mutationID, filter) if error != nil { return error diff --git a/pkg/sql/rowexec/backfiller_test.go b/pkg/sql/rowexec/backfiller_test.go index 31a90cace6a8..a9a52233b6a3 100644 --- a/pkg/sql/rowexec/backfiller_test.go +++ b/pkg/sql/rowexec/backfiller_test.go @@ -15,9 +15,9 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/backfill" @@ -174,7 +174,7 @@ func TestWriteResumeSpan(t *testing.T) { } var got []roachpb.Span - if err := kvDB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error got, _, _, err = rowexec.GetResumeSpans( ctx, registry, txn, tableDesc.ID, mutationID, backfill.IndexMutationFilter) diff --git a/pkg/sql/rowexec/columnbackfiller.go b/pkg/sql/rowexec/columnbackfiller.go index 8d5c969c1e8a..26b832b008e2 100644 --- a/pkg/sql/rowexec/columnbackfiller.go +++ b/pkg/sql/rowexec/columnbackfiller.go @@ -13,7 +13,7 @@ package rowexec import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/backfill" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -87,7 +87,7 @@ func (cb *columnBackfiller) runChunk( readAsOf hlc.Timestamp, ) (roachpb.Key, error) { var key roachpb.Key - err := cb.flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := cb.flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if cb.flowCtx.Cfg.TestingKnobs.RunBeforeBackfillChunk != nil { if err := cb.flowCtx.Cfg.TestingKnobs.RunBeforeBackfillChunk(sp); err != nil { return err diff --git a/pkg/sql/rowexec/index_skip_table_reader_test.go b/pkg/sql/rowexec/index_skip_table_reader_test.go index ff23852ea536..805098164300 100644 --- a/pkg/sql/rowexec/index_skip_table_reader_test.go +++ b/pkg/sql/rowexec/index_skip_table_reader_test.go @@ -17,7 +17,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -427,7 +427,7 @@ func TestIndexSkipTableReader(t *testing.T) { flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, - Txn: client.NewTxn(ctx, s.DB(), s.NodeID()), + Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), NodeID: s.NodeID(), } @@ -498,7 +498,7 @@ ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[ flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: st}, - Txn: client.NewTxn(ctx, tc.Server(0).DB(), nodeID), + Txn: kv.NewTxn(ctx, tc.Server(0).DB(), nodeID), NodeID: nodeID, } spec := execinfrapb.IndexSkipTableReaderSpec{ @@ -623,7 +623,7 @@ func BenchmarkIndexScanTableReader(b *testing.B) { flowCtxTableReader := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, - Txn: client.NewTxn(ctx, s.DB(), s.NodeID()), + Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), NodeID: s.NodeID(), } @@ -660,7 +660,7 @@ func BenchmarkIndexScanTableReader(b *testing.B) { flowCtxIndexSkipTableReader := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, - Txn: client.NewTxn(ctx, s.DB(), s.NodeID()), + Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), NodeID: s.NodeID(), } diff --git a/pkg/sql/rowexec/indexbackfiller.go b/pkg/sql/rowexec/indexbackfiller.go index f7b51f3b8c76..40aaa442f0dd 100644 --- a/pkg/sql/rowexec/indexbackfiller.go +++ b/pkg/sql/rowexec/indexbackfiller.go @@ -13,7 +13,7 @@ package rowexec import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" @@ -159,7 +159,7 @@ func (ib *indexBackfiller) runChunk( start := timeutil.Now() var entries []sqlbase.IndexEntry - if err := ib.flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := ib.flowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { txn.SetFixedTimestamp(ctx, readAsOf) // TODO(knz): do KV tracing in DistSQL processors. diff --git a/pkg/sql/rowexec/indexjoiner_test.go b/pkg/sql/rowexec/indexjoiner_test.go index 0845ba8d4d5c..6f6cd77787c4 100644 --- a/pkg/sql/rowexec/indexjoiner_test.go +++ b/pkg/sql/rowexec/indexjoiner_test.go @@ -19,7 +19,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -154,7 +154,7 @@ func TestIndexJoiner(t *testing.T) { Table: *c.desc, IndexIdx: 0, } - txn := client.NewTxn(context.Background(), s.DB(), s.NodeID()) + txn := kv.NewTxn(context.Background(), s.DB(), s.NodeID()) runProcessorTest( t, execinfrapb.ProcessorCoreUnion{JoinReader: &spec}, diff --git a/pkg/sql/rowexec/interleaved_reader_joiner_test.go b/pkg/sql/rowexec/interleaved_reader_joiner_test.go index b39a3571cedb..839999162705 100644 --- a/pkg/sql/rowexec/interleaved_reader_joiner_test.go +++ b/pkg/sql/rowexec/interleaved_reader_joiner_test.go @@ -17,7 +17,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -401,7 +401,7 @@ func TestInterleavedReaderJoiner(t *testing.T) { EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, // Run in a RootTxn so that there's no txn metadata produced. - Txn: client.NewTxn(ctx, s.DB(), s.NodeID()), + Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), NodeID: s.NodeID(), } @@ -531,7 +531,7 @@ func TestInterleavedReaderJoinerErrors(t *testing.T) { EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, // Run in a RootTxn so that there's no txn metadata produced. - Txn: client.NewTxn(ctx, s.DB(), s.NodeID()), + Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), NodeID: s.NodeID(), } @@ -582,9 +582,9 @@ func TestInterleavedReaderJoinerTrailingMetadata(t *testing.T) { ctx, sp := tracing.StartSnowballTrace(ctx, tracer, "test flow ctx") defer sp.Finish() - rootTxn := client.NewTxn(ctx, s.DB(), s.NodeID()) + rootTxn := kv.NewTxn(ctx, s.DB(), s.NodeID()) leafInputState := rootTxn.GetLeafTxnInputState(ctx) - leafTxn := client.NewLeafTxn(ctx, s.DB(), s.NodeID(), &leafInputState) + leafTxn := kv.NewLeafTxn(ctx, s.DB(), s.NodeID(), &leafInputState) flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, diff --git a/pkg/sql/rowexec/joinreader_test.go b/pkg/sql/rowexec/joinreader_test.go index 623fd97a746d..7eeef9d93343 100644 --- a/pkg/sql/rowexec/joinreader_test.go +++ b/pkg/sql/rowexec/joinreader_test.go @@ -21,7 +21,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -413,7 +413,7 @@ func TestJoinReader(t *testing.T) { TempStorage: tempEngine, DiskMonitor: &diskMonitor, }, - Txn: client.NewTxn(ctx, s.DB(), s.NodeID()), + Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), } encRows := make(sqlbase.EncDatumRows, len(c.input)) for rowIdx, row := range c.input { @@ -524,7 +524,7 @@ CREATE TABLE test.t (a INT, s STRING, INDEX (a, s))`); err != nil { TempStorage: tempEngine, DiskMonitor: &diskMonitor, }, - Txn: client.NewTxn(ctx, s.DB(), s.NodeID()), + Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), } // Set the memory limit to the minimum allocation size so that the row // container can buffer some rows in memory before spilling to disk. This @@ -618,9 +618,9 @@ func TestJoinReaderDrain(t *testing.T) { diskMonitor.Start(ctx, nil /* pool */, mon.MakeStandaloneBudget(math.MaxInt64)) defer diskMonitor.Stop(ctx) - rootTxn := client.NewTxn(ctx, s.DB(), s.NodeID()) + rootTxn := kv.NewTxn(ctx, s.DB(), s.NodeID()) leafInputState := rootTxn.GetLeafTxnInputState(ctx) - leafTxn := client.NewLeafTxn(ctx, s.DB(), s.NodeID(), &leafInputState) + leafTxn := kv.NewLeafTxn(ctx, s.DB(), s.NodeID(), &leafInputState) flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, @@ -726,7 +726,7 @@ func BenchmarkJoinReader(b *testing.B) { DiskMonitor: diskMonitor, Settings: st, }, - Txn: client.NewTxn(ctx, s.DB(), s.NodeID()), + Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), } const numCols = 2 diff --git a/pkg/sql/rowexec/rowfetcher.go b/pkg/sql/rowexec/rowfetcher.go index 6016fae67785..baad616300fd 100644 --- a/pkg/sql/rowexec/rowfetcher.go +++ b/pkg/sql/rowexec/rowfetcher.go @@ -14,7 +14,7 @@ import ( "context" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/row" @@ -27,11 +27,11 @@ import ( // collector wrapper can be plugged in. type rowFetcher interface { StartScan( - _ context.Context, _ *client.Txn, _ roachpb.Spans, limitBatches bool, limitHint int64, traceKV bool, + _ context.Context, _ *kv.Txn, _ roachpb.Spans, limitBatches bool, limitHint int64, traceKV bool, ) error StartInconsistentScan( _ context.Context, - _ *client.DB, + _ *kv.DB, initialTimestamp hlc.Timestamp, maxTimestampAge time.Duration, spans roachpb.Spans, diff --git a/pkg/sql/rowexec/sample_aggregator.go b/pkg/sql/rowexec/sample_aggregator.go index ba11d53ba765..89a0516e0824 100644 --- a/pkg/sql/rowexec/sample_aggregator.go +++ b/pkg/sql/rowexec/sample_aggregator.go @@ -15,8 +15,8 @@ import ( "time" "github.com/axiomhq/hyperloglog" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -314,7 +314,7 @@ func (s *sampleAggregator) writeResults(ctx context.Context) error { // internal executor instead of doing this weird thing where it uses the // internal executor to execute one statement at a time inside a db.Txn() // closure. - if err := s.FlowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := s.FlowCtx.Cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { for _, si := range s.sketches { distinctCount := int64(si.sketch.Estimate()) var histogram *stats.HistogramData diff --git a/pkg/sql/rowexec/stats.go b/pkg/sql/rowexec/stats.go index b4f0aedbe813..bdff1130007c 100644 --- a/pkg/sql/rowexec/stats.go +++ b/pkg/sql/rowexec/stats.go @@ -15,7 +15,7 @@ import ( "fmt" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/colexec/execerror" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -148,7 +148,7 @@ func (c *rowFetcherStatCollector) NextRow( // StartScan is part of the rowFetcher interface. func (c *rowFetcherStatCollector) StartScan( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, spans roachpb.Spans, limitBatches bool, limitHint int64, @@ -163,7 +163,7 @@ func (c *rowFetcherStatCollector) StartScan( // StartInconsistentScan is part of the rowFetcher interface. func (c *rowFetcherStatCollector) StartInconsistentScan( ctx context.Context, - db *client.DB, + db *kv.DB, initialTimestamp hlc.Timestamp, maxTimestampAge time.Duration, spans roachpb.Spans, diff --git a/pkg/sql/rowexec/tablereader_test.go b/pkg/sql/rowexec/tablereader_test.go index 529520e02934..2857cb56ce15 100644 --- a/pkg/sql/rowexec/tablereader_test.go +++ b/pkg/sql/rowexec/tablereader_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -130,7 +130,7 @@ func TestTableReader(t *testing.T) { flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, - Txn: client.NewTxn(ctx, s.DB(), s.NodeID()), + Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), NodeID: s.NodeID(), } @@ -215,7 +215,7 @@ ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[ flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: st}, - Txn: client.NewTxn(ctx, tc.Server(0).DB(), nodeID), + Txn: kv.NewTxn(ctx, tc.Server(0).DB(), nodeID), NodeID: nodeID, } spec := execinfrapb.TableReaderSpec{ @@ -320,7 +320,7 @@ func TestLimitScans(t *testing.T) { flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, - Txn: client.NewTxn(ctx, kvDB, s.NodeID()), + Txn: kv.NewTxn(ctx, kvDB, s.NodeID()), NodeID: s.NodeID(), } spec := execinfrapb.TableReaderSpec{ @@ -424,7 +424,7 @@ func BenchmarkTableReader(b *testing.B) { flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, - Txn: client.NewTxn(ctx, s.DB(), s.NodeID()), + Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), NodeID: s.NodeID(), } diff --git a/pkg/sql/rowexec/utils_test.go b/pkg/sql/rowexec/utils_test.go index 4ea8fdde1e20..96d81b7df424 100644 --- a/pkg/sql/rowexec/utils_test.go +++ b/pkg/sql/rowexec/utils_test.go @@ -14,7 +14,7 @@ import ( "context" "testing" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -35,7 +35,7 @@ func runProcessorTest( inputRows sqlbase.EncDatumRows, outputTypes []types.T, expected sqlbase.EncDatumRows, - txn *client.Txn, + txn *kv.Txn, ) { in := distsqlutils.NewRowBuffer(inputTypes, inputRows, distsqlutils.RowBufferArgs{}) out := &distsqlutils.RowBuffer{} diff --git a/pkg/sql/rowexec/zigzagjoiner.go b/pkg/sql/rowexec/zigzagjoiner.go index a93d843fe17f..01ab560959c5 100644 --- a/pkg/sql/rowexec/zigzagjoiner.go +++ b/pkg/sql/rowexec/zigzagjoiner.go @@ -14,7 +14,7 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -729,7 +729,7 @@ func (z *zigzagJoiner) emitFromContainers() (sqlbase.EncDatumRow, error) { // sides until a match is found then emits the results of the match one result // at a time. func (z *zigzagJoiner) nextRow( - ctx context.Context, txn *client.Txn, + ctx context.Context, txn *kv.Txn, ) (sqlbase.EncDatumRow, *execinfrapb.ProducerMetadata) { for { if err := z.cancelChecker.Check(); err != nil { diff --git a/pkg/sql/rowexec/zigzagjoiner_test.go b/pkg/sql/rowexec/zigzagjoiner_test.go index cb5c338e8815..984b2856e2fe 100644 --- a/pkg/sql/rowexec/zigzagjoiner_test.go +++ b/pkg/sql/rowexec/zigzagjoiner_test.go @@ -15,7 +15,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" @@ -509,7 +509,7 @@ func TestZigzagJoiner(t *testing.T) { flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: st}, - Txn: client.NewTxn(ctx, s.DB(), s.NodeID()), + Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), } out := &distsqlutils.RowBuffer{} @@ -572,7 +572,7 @@ func TestZigzagJoinerDrain(t *testing.T) { flowCtx := execinfra.FlowCtx{ EvalCtx: &evalCtx, Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, - Txn: client.NewTxn(ctx, s.DB(), s.NodeID()), + Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), } encRow := make(sqlbase.EncDatumRow, 1) diff --git a/pkg/sql/scatter.go b/pkg/sql/scatter.go index 1bd5ec76a129..f79189245492 100644 --- a/pkg/sql/scatter.go +++ b/pkg/sql/scatter.go @@ -13,8 +13,8 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -131,7 +131,7 @@ func (n *scatterNode) startExec(params runParams) error { RequestHeader: roachpb.RequestHeader{Key: n.run.span.Key, EndKey: n.run.span.EndKey}, RandomizeLeases: true, } - res, pErr := client.SendWrapped(params.ctx, db.NonTransactionalSender(), req) + res, pErr := kv.SendWrapped(params.ctx, db.NonTransactionalSender(), req) if pErr != nil { return pErr.GoError() } diff --git a/pkg/sql/schema/schema.go b/pkg/sql/schema/schema.go index 11711033fdc0..cc1045464e53 100644 --- a/pkg/sql/schema/schema.go +++ b/pkg/sql/schema/schema.go @@ -15,8 +15,8 @@ package schema import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -28,7 +28,7 @@ import ( // Instead, we have to rely on a scan of the kv table. // TODO(sqlexec): this should probably be cached. func ResolveNameByID( - ctx context.Context, txn *client.Txn, dbID sqlbase.ID, schemaID sqlbase.ID, + ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, schemaID sqlbase.ID, ) (string, error) { // Fast-path for public schema, to avoid hot lookups. if schemaID == keys.PublicSchemaID { @@ -47,7 +47,7 @@ func ResolveNameByID( // GetForDatabase looks up and returns all available // schema ids to names for a given database. func GetForDatabase( - ctx context.Context, txn *client.Txn, dbID sqlbase.ID, + ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, ) (map[sqlbase.ID]string, error) { log.Eventf(ctx, "fetching all schema descriptor IDs for %d", dbID) diff --git a/pkg/sql/schema_accessors.go b/pkg/sql/schema_accessors.go index c3787afcbc4e..b68a81fd2543 100644 --- a/pkg/sql/schema_accessors.go +++ b/pkg/sql/schema_accessors.go @@ -13,7 +13,7 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -75,20 +75,20 @@ type SchemaAccessor interface { // GetDatabaseDesc looks up a database by name and returns its // descriptor. If the database is not found and required is true, // an error is returned; otherwise a nil reference is returned. - GetDatabaseDesc(ctx context.Context, txn *client.Txn, dbName string, flags tree.DatabaseLookupFlags) (*DatabaseDescriptor, error) + GetDatabaseDesc(ctx context.Context, txn *kv.Txn, dbName string, flags tree.DatabaseLookupFlags) (*DatabaseDescriptor, error) // IsValidSchema returns true and the SchemaID if the given schema name is valid for the given database. - IsValidSchema(ctx context.Context, txn *client.Txn, dbID sqlbase.ID, scName string) (bool, sqlbase.ID, error) + IsValidSchema(ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, scName string) (bool, sqlbase.ID, error) // GetObjectNames returns the list of all objects in the given // database and schema. // TODO(solon): when separate schemas are supported, this // API should be extended to use schema descriptors. - GetObjectNames(ctx context.Context, txn *client.Txn, db *DatabaseDescriptor, scName string, flags tree.DatabaseListFlags) (TableNames, error) + GetObjectNames(ctx context.Context, txn *kv.Txn, db *DatabaseDescriptor, scName string, flags tree.DatabaseListFlags) (TableNames, error) // GetObjectDesc looks up an object by name and returns both its // descriptor and that of its parent database. If the object is not // found and flags.required is true, an error is returned, otherwise // a nil reference is returned. - GetObjectDesc(ctx context.Context, txn *client.Txn, settings *cluster.Settings, name *ObjectName, flags tree.ObjectLookupFlags) (ObjectDescriptor, error) + GetObjectDesc(ctx context.Context, txn *kv.Txn, settings *cluster.Settings, name *ObjectName, flags tree.ObjectLookupFlags) (ObjectDescriptor, error) } diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index 66449e5f4faa..ec1c693d7ff7 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -20,10 +20,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" @@ -118,7 +118,7 @@ type SchemaChanger struct { tableID sqlbase.ID mutationID sqlbase.MutationID nodeID roachpb.NodeID - db *client.DB + db *kv.DB leaseMgr *LeaseManager // The SchemaChangeManager can attempt to execute this schema // changer after this time. @@ -152,7 +152,7 @@ func NewSchemaChangerForTesting( tableID sqlbase.ID, mutationID sqlbase.MutationID, nodeID roachpb.NodeID, - db client.DB, + db kv.DB, leaseMgr *LeaseManager, jobRegistry *jobs.Registry, execCfg *ExecutorConfig, @@ -272,7 +272,7 @@ func (sc *SchemaChanger) AcquireLease( ctx context.Context, ) (sqlbase.TableDescriptor_SchemaChangeLease, error) { var lease sqlbase.TableDescriptor_SchemaChangeLease - err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } @@ -306,7 +306,7 @@ func (sc *SchemaChanger) AcquireLease( } func (sc *SchemaChanger) findTableWithLease( - ctx context.Context, txn *client.Txn, lease sqlbase.TableDescriptor_SchemaChangeLease, + ctx context.Context, txn *kv.Txn, lease sqlbase.TableDescriptor_SchemaChangeLease, ) (*sqlbase.TableDescriptor, error) { tableDesc, err := sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) if err != nil { @@ -327,7 +327,7 @@ func (sc *SchemaChanger) findTableWithLease( func (sc *SchemaChanger) ReleaseLease( ctx context.Context, lease sqlbase.TableDescriptor_SchemaChangeLease, ) error { - return sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { tableDesc, err := sc.findTableWithLease(ctx, txn, lease) if err != nil { return err @@ -359,7 +359,7 @@ func (sc *SchemaChanger) ExtendLease( } // Update lease. var lease sqlbase.TableDescriptor_SchemaChangeLease - if err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { tableDesc, err := sc.findTableWithLease(ctx, txn, *existingLease) if err != nil { return err @@ -394,9 +394,9 @@ func (sc *SchemaChanger) DropTableDesc( zoneKeyPrefix := config.MakeZoneKeyPrefix(uint32(tableDesc.ID)) // Finished deleting all the table data, now delete the table meta data. - return sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { // Delete table descriptor - b := &client.Batch{} + b := &kv.Batch{} if traceKV { log.VEventf(ctx, 2, "Del %s", descKey) log.VEventf(ctx, 2, "DelRange %s", zoneKeyPrefix) @@ -416,7 +416,7 @@ func (sc *SchemaChanger) DropTableDesc( tableDesc.GetDropJobID(), tableDesc.ID, jobspb.Status_DONE, - func(ctx context.Context, txn *client.Txn, job *jobs.Job) error { + func(ctx context.Context, txn *kv.Txn, job *jobs.Job) error { // Delete the zone config entry for the dropped database associated // with the job, if it exists. details := job.Details().(jobspb.SchemaChangeDetails) @@ -493,7 +493,7 @@ func (sc *SchemaChanger) truncateTable( if tableSpan.EndKey.Less(endKey) { endKey = tableSpan.EndKey } - var b client.Batch + var b kv.Batch b.AddRawRequest(&roachpb.ClearRangeRequest{ RequestHeader: roachpb.RequestHeader{ Key: lastKey.AsRawKey(), @@ -530,7 +530,7 @@ func (sc *SchemaChanger) maybeDropTable( // we still need to wait for the deadline to expire. if table.DropTime != 0 { var timeRemaining time.Duration - if err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { timeRemaining = 0 _, zoneCfg, _, err := GetZoneConfigInTxn(ctx, txn, uint32(table.ID), &sqlbase.IndexDescriptor{}, "", false /* getInheritedDefault */) @@ -626,7 +626,7 @@ func (sc *SchemaChanger) maybeBackfillCreateTableAs( g.GoCtx(func(ctx context.Context) error { defer close(maintainLease) - return sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { txn.SetFixedTimestamp(ctx, table.CreateAsOfTime) // Create an internal planner as the planner used to serve the user query @@ -743,7 +743,7 @@ func (sc *SchemaChanger) maybeMakeAddTablePublic( tbl.State = sqlbase.TableDescriptor_PUBLIC return nil }, - func(txn *client.Txn) error { return nil }, + func(txn *kv.Txn) error { return nil }, ); err != nil { return err } @@ -829,7 +829,7 @@ func (sc *SchemaChanger) maybeGCMutations( return nil }, - func(txn *client.Txn) error { + func(txn *kv.Txn) error { job, err := sc.jobRegistry.LoadJobWithTxn(ctx, mutation.JobID, txn) if err != nil { log.Warningf(ctx, "ignoring error during logEvent while GCing mutations: %+v", err) @@ -844,11 +844,11 @@ func (sc *SchemaChanger) maybeGCMutations( func (sc *SchemaChanger) updateDropTableJob( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, jobID int64, tableID sqlbase.ID, status jobspb.Status, - onSuccess func(context.Context, *client.Txn, *jobs.Job) error, + onSuccess func(context.Context, *kv.Txn, *jobs.Job) error, ) error { job, err := sc.jobRegistry.LoadJobWithTxn(ctx, jobID, txn) if err != nil { @@ -880,7 +880,7 @@ func (sc *SchemaChanger) updateDropTableJob( case jobspb.Status_ROCKSDB_COMPACTION: runningStatus = RunningStatusCompaction case jobspb.Status_DONE: - return job.WithTxn(txn).Succeeded(ctx, func(ctx context.Context, txn *client.Txn) error { + return job.WithTxn(txn).Succeeded(ctx, func(ctx context.Context, txn *kv.Txn) error { return onSuccess(ctx, txn, job) }) default: @@ -917,7 +917,7 @@ func (sc *SchemaChanger) drainNames(ctx context.Context) error { return nil }, // Reclaim all the old names. - func(txn *client.Txn) error { + func(txn *kv.Txn) error { b := txn.NewBatch() for _, drain := range namesToReclaim { err := sqlbase.RemoveObjectNamespaceEntry(ctx, txn, drain.ParentID, drain.ParentSchemaID, @@ -930,7 +930,7 @@ func (sc *SchemaChanger) drainNames(ctx context.Context) error { if dropJobID != 0 { if err := sc.updateDropTableJob( ctx, txn, dropJobID, sc.tableID, jobspb.Status_WAIT_FOR_GC_INTERVAL, - func(context.Context, *client.Txn, *jobs.Job) error { + func(context.Context, *kv.Txn, *jobs.Job) error { return nil }); err != nil { return err @@ -1104,7 +1104,7 @@ func (sc *SchemaChanger) exec(ctx context.Context, inSession bool) error { // initialize the job running status. func (sc *SchemaChanger) initJobRunningStatus(ctx context.Context) error { - return sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { desc, err := sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) if err != nil { return err @@ -1232,7 +1232,7 @@ func (sc *SchemaChanger) RunStateMachineBeforeBackfill(ctx context.Context) erro return errDidntUpdateDescriptor } return nil - }, func(txn *client.Txn) error { + }, func(txn *kv.Txn) error { if sc.job != nil { if err := sc.job.WithTxn(txn).RunningStatus(ctx, func(ctx context.Context, details jobspb.Details) (jobs.RunningStatus, error) { return runStatus, nil @@ -1279,7 +1279,7 @@ func (sc *SchemaChanger) done(ctx context.Context) (*sqlbase.ImmutableTableDescr // We make a call to PublishMultiple to handle the situation to add Foreign Key backreferences. var fksByBackrefTable map[sqlbase.ID][]*sqlbase.ConstraintToUpdate var interleaveParents map[sqlbase.ID]struct{} - err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { fksByBackrefTable = make(map[sqlbase.ID][]*sqlbase.ConstraintToUpdate) interleaveParents = make(map[sqlbase.ID]struct{}) @@ -1466,7 +1466,7 @@ func (sc *SchemaChanger) done(ctx context.Context) (*sqlbase.ImmutableTableDescr return nil } - descs, err := sc.leaseMgr.PublishMultiple(ctx, tableIDsToUpdate, update, func(txn *client.Txn) error { + descs, err := sc.leaseMgr.PublishMultiple(ctx, tableIDsToUpdate, update, func(txn *kv.Txn) error { // If the job already has a terminal status, we shouldn't need to update // its status again. One way this may happen is when a table is dropped // all jobs that mutate that table are marked successful. So if is a job @@ -1521,7 +1521,7 @@ func (sc *SchemaChanger) notFirstInLine( ) (*sqlbase.TableDescriptor, bool, error) { var notFirst bool var desc *sqlbase.TableDescriptor - err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { notFirst = false var err error desc, err = sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) @@ -1579,7 +1579,7 @@ func (sc *SchemaChanger) reverseMutations(ctx context.Context, causingError erro // Get the other tables whose foreign key backreferences need to be removed. var fksByBackrefTable map[sqlbase.ID][]*sqlbase.ConstraintToUpdate - err := sc.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { fksByBackrefTable = make(map[sqlbase.ID][]*sqlbase.ConstraintToUpdate) var err error desc, err := sqlbase.GetTableDescFromID(ctx, txn, sc.tableID) @@ -1679,7 +1679,7 @@ func (sc *SchemaChanger) reverseMutations(ctx context.Context, causingError erro return nil } - _, err = sc.leaseMgr.PublishMultiple(ctx, tableIDsToUpdate, update, func(txn *client.Txn) error { + _, err = sc.leaseMgr.PublishMultiple(ctx, tableIDsToUpdate, update, func(txn *kv.Txn) error { // Read the table descriptor from the store. The Version of the // descriptor has already been incremented in the transaction and // this descriptor can be modified without incrementing the version. @@ -1742,7 +1742,7 @@ func (sc *SchemaChanger) reverseMutations(ctx context.Context, causingError erro // Mark the job associated with the mutation as failed. func markJobFailed( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, tableDesc *sqlbase.TableDescriptor, mutationID sqlbase.MutationID, jobRegistry *jobs.Registry, @@ -1764,7 +1764,7 @@ func markJobFailed( // Mark the current schema change job as failed and create a new rollback job // representing the schema change and return it. func (sc *SchemaChanger) createRollbackJob( - ctx context.Context, txn *client.Txn, tableDesc *sqlbase.TableDescriptor, causingError error, + ctx context.Context, txn *kv.Txn, tableDesc *sqlbase.TableDescriptor, causingError error, ) (*jobs.Job, error) { // Mark job as failed. @@ -2063,7 +2063,7 @@ func NewSchemaChangeManager( ambientCtx log.AmbientContext, execCfg *ExecutorConfig, testingKnobs *SchemaChangerTestingKnobs, - db client.DB, + db kv.DB, nodeDesc roachpb.NodeDescriptor, dsp *DistSQLPlanner, ieFactory sqlutil.SessionBoundInternalExecutorFactory, diff --git a/pkg/sql/schema_changer_test.go b/pkg/sql/schema_changer_test.go index ed202b859757..480d4230ca8d 100644 --- a/pkg/sql/schema_changer_test.go +++ b/pkg/sql/schema_changer_test.go @@ -24,10 +24,10 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" @@ -408,7 +408,7 @@ CREATE INDEX foo ON t.test (v) } } -func getTableKeyCount(ctx context.Context, kvDB *client.DB) (int, error) { +func getTableKeyCount(ctx context.Context, kvDB *kv.DB) (int, error) { tableDesc := sqlbase.GetTableDescriptor(kvDB, "t", "test") tablePrefix := roachpb.Key(keys.MakeTablePrefix(uint32(tableDesc.ID))) tableEnd := tablePrefix.PrefixEnd() @@ -416,7 +416,7 @@ func getTableKeyCount(ctx context.Context, kvDB *client.DB) (int, error) { return len(kvs), err } -func checkTableKeyCountExact(ctx context.Context, kvDB *client.DB, e int) error { +func checkTableKeyCountExact(ctx context.Context, kvDB *kv.DB, e int) error { if count, err := getTableKeyCount(ctx, kvDB); err != nil { return err } else if count != e { @@ -427,7 +427,7 @@ func checkTableKeyCountExact(ctx context.Context, kvDB *client.DB, e int) error // checkTableKeyCount returns the number of KVs in the DB, the multiple should be the // number of columns. -func checkTableKeyCount(ctx context.Context, kvDB *client.DB, multiple int, maxValue int) error { +func checkTableKeyCount(ctx context.Context, kvDB *kv.DB, multiple int, maxValue int) error { return checkTableKeyCountExact(ctx, kvDB, multiple*(maxValue+1)) } @@ -436,7 +436,7 @@ func checkTableKeyCount(ctx context.Context, kvDB *client.DB, multiple int, maxV func runSchemaChangeWithOperations( t *testing.T, sqlDB *gosql.DB, - kvDB *client.DB, + kvDB *kv.DB, jobRegistry *jobs.Registry, schemaChange string, maxValue int, @@ -1145,7 +1145,7 @@ COMMIT; // Add an index and check that it succeeds. func addIndexSchemaChange( - t *testing.T, sqlDB *gosql.DB, kvDB *client.DB, maxValue int, numKeysPerRow int, + t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, maxValue int, numKeysPerRow int, ) { if _, err := sqlDB.Exec("CREATE UNIQUE INDEX foo ON t.test (v)"); err != nil { t.Fatal(err) @@ -1186,7 +1186,7 @@ func addIndexSchemaChange( // Add a column with a check constraint and check that it succeeds. func addColumnSchemaChange( - t *testing.T, sqlDB *gosql.DB, kvDB *client.DB, maxValue int, numKeysPerRow int, + t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, maxValue int, numKeysPerRow int, ) { if _, err := sqlDB.Exec("ALTER TABLE t.test ADD COLUMN x DECIMAL DEFAULT (DECIMAL '1.4') CHECK (x >= 0)"); err != nil { t.Fatal(err) @@ -1223,7 +1223,7 @@ func addColumnSchemaChange( // Drop a column and check that it succeeds. func dropColumnSchemaChange( - t *testing.T, sqlDB *gosql.DB, kvDB *client.DB, maxValue int, numKeysPerRow int, + t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, maxValue int, numKeysPerRow int, ) { if _, err := sqlDB.Exec("ALTER TABLE t.test DROP x"); err != nil { t.Fatal(err) @@ -1239,7 +1239,7 @@ func dropColumnSchemaChange( // Drop an index and check that it succeeds. func dropIndexSchemaChange( - t *testing.T, sqlDB *gosql.DB, kvDB *client.DB, maxValue int, numKeysPerRow int, + t *testing.T, sqlDB *gosql.DB, kvDB *kv.DB, maxValue int, numKeysPerRow int, ) { if _, err := sqlDB.Exec("DROP INDEX t.test@foo CASCADE"); err != nil { t.Fatal(err) @@ -3749,7 +3749,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL DEFAULT (DECIMAL '3.14 // Check that the table descriptor exists so we know the data will // eventually be deleted. var droppedDesc *sqlbase.TableDescriptor - if err := kvDB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error droppedDesc, err = sqlbase.GetTableDescFromID(ctx, txn, tableDesc.ID) return err @@ -3866,7 +3866,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT, pi DECIMAL REFERENCES t.pi (d) DE // Wait until the older descriptor has been deleted. testutils.SucceedsSoon(t, func() error { - if err := kvDB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var err error _, err = sqlbase.GetTableDescFromID(ctx, txn, tableDesc.ID) return err @@ -4090,7 +4090,7 @@ func TestIndexBackfillAfterGC(t *testing.T) { RequestHeader: roachpb.RequestHeaderFromSpan(sp), Threshold: tc.Server(0).Clock().Now(), } - _, err := client.SendWrapped(ctx, tc.Server(0).DistSenderI().(*kvcoord.DistSender), &gcr) + _, err := kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(*kvcoord.DistSender), &gcr) if err != nil { panic(err) } @@ -4757,7 +4757,7 @@ func TestIndexBackfillValidation(t *testing.T) { params, _ := tests.CreateTestServerParams() const maxValue = 1000 backfillCount := int64(0) - var db *client.DB + var db *kv.DB var tableDesc *sqlbase.TableDescriptor params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{ @@ -4827,7 +4827,7 @@ func TestInvertedIndexBackfillValidation(t *testing.T) { params, _ := tests.CreateTestServerParams() const maxValue = 1000 backfillCount := int64(0) - var db *client.DB + var db *kv.DB var tableDesc *sqlbase.TableDescriptor params.Knobs = base.TestingKnobs{ SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{ diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go index dfafcc755ac1..0b852662d2dd 100644 --- a/pkg/sql/sem/builtins/builtins.go +++ b/pkg/sql/sem/builtins/builtins.go @@ -33,7 +33,7 @@ import ( "github.com/cockroachdb/apd" "github.com/cockroachdb/cockroach/pkg/build" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -3433,7 +3433,7 @@ may increase either contention or retry errors, or both.`, ReturnType: tree.FixedReturnType(types.Int), Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) { key := []byte(tree.MustBeDBytes(args[0])) - b := &client.Batch{} + b := &kv.Batch{} b.AddRawRequest(&roachpb.LeaseInfoRequest{ RequestHeader: roachpb.RequestHeader{ Key: key, @@ -3500,7 +3500,7 @@ may increase either contention or retry errors, or both.`, ReturnType: tree.FixedReturnType(types.Jsonb), Fn: func(ctx *tree.EvalContext, args tree.Datums) (tree.Datum, error) { key := []byte(tree.MustBeDBytes(args[0])) - b := &client.Batch{} + b := &kv.Batch{} b.AddRawRequest(&roachpb.RangeStatsRequest{ RequestHeader: roachpb.RequestHeader{ Key: key, diff --git a/pkg/sql/sem/builtins/generator_builtins.go b/pkg/sql/sem/builtins/generator_builtins.go index a1910b7c54eb..ed4165828928 100644 --- a/pkg/sql/sem/builtins/generator_builtins.go +++ b/pkg/sql/sem/builtins/generator_builtins.go @@ -16,8 +16,8 @@ import ( "fmt" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/lex" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -80,11 +80,11 @@ var aclexplodeGeneratorType = types.MakeLabeledTuple( // aclExplodeGenerator supports the execution of aclexplode. type aclexplodeGenerator struct{} -func (aclexplodeGenerator) ResolvedType() *types.T { return aclexplodeGeneratorType } -func (aclexplodeGenerator) Start(_ context.Context, _ *client.Txn) error { return nil } -func (aclexplodeGenerator) Close() {} -func (aclexplodeGenerator) Next(_ context.Context) (bool, error) { return false, nil } -func (aclexplodeGenerator) Values() tree.Datums { return nil } +func (aclexplodeGenerator) ResolvedType() *types.T { return aclexplodeGeneratorType } +func (aclexplodeGenerator) Start(_ context.Context, _ *kv.Txn) error { return nil } +func (aclexplodeGenerator) Close() {} +func (aclexplodeGenerator) Next(_ context.Context) (bool, error) { return false, nil } +func (aclexplodeGenerator) Values() tree.Datums { return nil } // generators is a map from name to slice of Builtins for all built-in // generators. @@ -321,7 +321,7 @@ func (*keywordsValueGenerator) ResolvedType() *types.T { return keywordsValueGen func (*keywordsValueGenerator) Close() {} // Start implements the tree.ValueGenerator interface. -func (k *keywordsValueGenerator) Start(_ context.Context, _ *client.Txn) error { +func (k *keywordsValueGenerator) Start(_ context.Context, _ *kv.Txn) error { k.curKeyword = -1 return nil } @@ -456,7 +456,7 @@ func (s *seriesValueGenerator) ResolvedType() *types.T { } // Start implements the tree.ValueGenerator interface. -func (s *seriesValueGenerator) Start(_ context.Context, _ *client.Txn) error { +func (s *seriesValueGenerator) Start(_ context.Context, _ *kv.Txn) error { s.nextOK = true s.start = s.origStart s.value = s.origStart @@ -509,7 +509,7 @@ func (s *multipleArrayValueGenerator) ResolvedType() *types.T { } // Start implements the tree.ValueGenerator interface. -func (s *multipleArrayValueGenerator) Start(_ context.Context, _ *client.Txn) error { +func (s *multipleArrayValueGenerator) Start(_ context.Context, _ *kv.Txn) error { s.datums = make(tree.Datums, len(s.arrays)) s.nextIndex = -1 return nil @@ -559,7 +559,7 @@ func (s *arrayValueGenerator) ResolvedType() *types.T { } // Start implements the tree.ValueGenerator interface. -func (s *arrayValueGenerator) Start(_ context.Context, _ *client.Txn) error { +func (s *arrayValueGenerator) Start(_ context.Context, _ *kv.Txn) error { s.nextIndex = -1 return nil } @@ -608,7 +608,7 @@ func (s *expandArrayValueGenerator) ResolvedType() *types.T { } // Start implements the tree.ValueGenerator interface. -func (s *expandArrayValueGenerator) Start(_ context.Context, _ *client.Txn) error { +func (s *expandArrayValueGenerator) Start(_ context.Context, _ *kv.Txn) error { s.avg.nextIndex = -1 return nil } @@ -675,7 +675,7 @@ func (s *subscriptsValueGenerator) ResolvedType() *types.T { } // Start implements the tree.ValueGenerator interface. -func (s *subscriptsValueGenerator) Start(_ context.Context, _ *client.Txn) error { +func (s *subscriptsValueGenerator) Start(_ context.Context, _ *kv.Txn) error { if s.reverse { s.avg.nextIndex = s.avg.array.Len() } else { @@ -726,7 +726,7 @@ func makeUnaryGenerator(_ *tree.EvalContext, args tree.Datums) (tree.ValueGenera func (*unaryValueGenerator) ResolvedType() *types.T { return unaryValueGeneratorType } // Start implements the tree.ValueGenerator interface. -func (s *unaryValueGenerator) Start(_ context.Context, _ *client.Txn) error { +func (s *unaryValueGenerator) Start(_ context.Context, _ *kv.Txn) error { s.done = false return nil } @@ -827,7 +827,7 @@ func (g *jsonArrayGenerator) ResolvedType() *types.T { } // Start implements the tree.ValueGenerator interface. -func (g *jsonArrayGenerator) Start(_ context.Context, _ *client.Txn) error { +func (g *jsonArrayGenerator) Start(_ context.Context, _ *kv.Txn) error { g.nextIndex = -1 g.json.JSON = g.json.JSON.MaybeDecode() g.buf[0] = nil @@ -900,7 +900,7 @@ func (g *jsonObjectKeysGenerator) ResolvedType() *types.T { } // Start implements the tree.ValueGenerator interface. -func (g *jsonObjectKeysGenerator) Start(_ context.Context, _ *client.Txn) error { return nil } +func (g *jsonObjectKeysGenerator) Start(_ context.Context, _ *kv.Txn) error { return nil } // Close implements the tree.ValueGenerator interface. func (g *jsonObjectKeysGenerator) Close() {} @@ -979,7 +979,7 @@ func (g *jsonEachGenerator) ResolvedType() *types.T { } // Start implements the tree.ValueGenerator interface. -func (g *jsonEachGenerator) Start(_ context.Context, _ *client.Txn) error { +func (g *jsonEachGenerator) Start(_ context.Context, _ *kv.Txn) error { iter, err := g.target.ObjectIter() if err != nil { return err @@ -1023,7 +1023,7 @@ func (g *jsonEachGenerator) Values() tree.Datums { type checkConsistencyGenerator struct { ctx context.Context - db *client.DB + db *kv.DB from, to roachpb.Key mode roachpb.ChecksumMode // remainingRows is populated by Start(). Each Next() call peels of the first @@ -1082,8 +1082,8 @@ func (*checkConsistencyGenerator) ResolvedType() *types.T { } // Start is part of the tree.ValueGenerator interface. -func (c *checkConsistencyGenerator) Start(_ context.Context, _ *client.Txn) error { - var b client.Batch +func (c *checkConsistencyGenerator) Start(_ context.Context, _ *kv.Txn) error { + var b kv.Batch b.AddRawRequest(&roachpb.CheckConsistencyRequest{ RequestHeader: roachpb.RequestHeader{ Key: c.from, diff --git a/pkg/sql/sem/builtins/generator_builtins_test.go b/pkg/sql/sem/builtins/generator_builtins_test.go index 89a9bd87c841..d824e9fdd1f9 100644 --- a/pkg/sql/sem/builtins/generator_builtins_test.go +++ b/pkg/sql/sem/builtins/generator_builtins_test.go @@ -15,7 +15,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" @@ -30,7 +30,7 @@ func TestConcurrentProcessorsReadEpoch(t *testing.T) { SQLEvalContext: &tree.EvalContextTestingKnobs{ CallbackGenerators: map[string]*tree.CallbackValueGenerator{ "my_callback": tree.NewCallbackValueGenerator( - func(ctx context.Context, prev int, _ *client.Txn) (int, error) { + func(ctx context.Context, prev int, _ *kv.Txn) (int, error) { if prev < 10 { return prev + 1, nil } diff --git a/pkg/sql/sem/tree/eval.go b/pkg/sql/sem/tree/eval.go index a6753eae0d5b..87cb76ee6f48 100644 --- a/pkg/sql/sem/tree/eval.go +++ b/pkg/sql/sem/tree/eval.go @@ -23,7 +23,7 @@ import ( "github.com/cockroachdb/apd" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -2645,13 +2645,13 @@ type EvalSessionAccessor interface { type InternalExecutor interface { // Query is part of the sqlutil.InternalExecutor interface. Query( - ctx context.Context, opName string, txn *client.Txn, + ctx context.Context, opName string, txn *kv.Txn, stmt string, qargs ...interface{}, ) ([]Datums, error) // QueryRow is part of the sqlutil.InternalExecutor interface. QueryRow( - ctx context.Context, opName string, txn *client.Txn, stmt string, qargs ...interface{}, + ctx context.Context, opName string, txn *kv.Txn, stmt string, qargs ...interface{}, ) (Datums, error) } @@ -2808,9 +2808,9 @@ type EvalContext struct { Sequence SequenceOperators // The transaction in which the statement is executing. - Txn *client.Txn + Txn *kv.Txn // A handle to the database. - DB *client.DB + DB *kv.DB ReCache *RegexpCache tmpDec apd.Decimal @@ -2850,7 +2850,7 @@ func MakeTestingEvalContext(st *cluster.Settings) EvalContext { // EvalContext so do not start or close the memory monitor. func MakeTestingEvalContextWithMon(st *cluster.Settings, monitor *mon.BytesMonitor) EvalContext { ctx := EvalContext{ - Txn: &client.Txn{}, + Txn: &kv.Txn{}, SessionData: &sessiondata.SessionData{}, Settings: st, } @@ -5310,16 +5310,16 @@ type CallbackValueGenerator struct { // as prev initially, and the value it previously returned for subsequent // invocations. Once it returns -1 or an error, it will not be invoked any // more. - cb func(ctx context.Context, prev int, txn *client.Txn) (int, error) + cb func(ctx context.Context, prev int, txn *kv.Txn) (int, error) val int - txn *client.Txn + txn *kv.Txn } var _ ValueGenerator = &CallbackValueGenerator{} // NewCallbackValueGenerator creates a new CallbackValueGenerator. func NewCallbackValueGenerator( - cb func(ctx context.Context, prev int, txn *client.Txn) (int, error), + cb func(ctx context.Context, prev int, txn *kv.Txn) (int, error), ) *CallbackValueGenerator { return &CallbackValueGenerator{ cb: cb, @@ -5332,7 +5332,7 @@ func (c *CallbackValueGenerator) ResolvedType() *types.T { } // Start is part of the ValueGenerator interface. -func (c *CallbackValueGenerator) Start(_ context.Context, txn *client.Txn) error { +func (c *CallbackValueGenerator) Start(_ context.Context, txn *kv.Txn) error { c.txn = txn return nil } diff --git a/pkg/sql/sem/tree/generators.go b/pkg/sql/sem/tree/generators.go index 1bb31beb592c..733f6f1cb2fc 100644 --- a/pkg/sql/sem/tree/generators.go +++ b/pkg/sql/sem/tree/generators.go @@ -13,7 +13,7 @@ package tree import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/types" ) @@ -48,7 +48,7 @@ type ValueGenerator interface { // // txn represents the txn that the generator will run inside of. The generator // is expected to hold on to this txn and use it in Next() calls. - Start(ctx context.Context, txn *client.Txn) error + Start(ctx context.Context, txn *kv.Txn) error // Next determines whether there is a row of data available. Next(context.Context) (bool, error) diff --git a/pkg/sql/sem/tree/timeconv_test.go b/pkg/sql/sem/tree/timeconv_test.go index 37814ecb4086..afb9b92b5151 100644 --- a/pkg/sql/sem/tree/timeconv_test.go +++ b/pkg/sql/sem/tree/timeconv_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" _ "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -41,12 +41,12 @@ func TestClusterTimestampConversion(t *testing.T) { } clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) - senderFactory := client.MakeMockTxnSenderFactory( + senderFactory := kv.MakeMockTxnSenderFactory( func(context.Context, *roachpb.Transaction, roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { panic("unused") }) - db := client.NewDB( + db := kv.NewDB( testutils.MakeAmbientCtx(), senderFactory, clock) @@ -62,12 +62,12 @@ func TestClusterTimestampConversion(t *testing.T) { ) ctx := tree.EvalContext{ - Txn: client.NewTxnFromProto( + Txn: kv.NewTxnFromProto( context.Background(), db, 1, /* gatewayNodeID */ ts, - client.RootTxn, + kv.RootTxn, &txnProto, ), } diff --git a/pkg/sql/sequence.go b/pkg/sql/sequence.go index 86f10d258579..2875a1f1df6f 100644 --- a/pkg/sql/sequence.go +++ b/pkg/sql/sequence.go @@ -14,8 +14,8 @@ import ( "context" "math" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/parser" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -50,7 +50,7 @@ func (p *planner) IncrementSequence(ctx context.Context, seqName *tree.TableName val = int64(rowid) } else { seqValueKey := keys.MakeSequenceKey(uint32(descriptor.ID)) - val, err = client.IncrementValRetryable( + val, err = kv.IncrementValRetryable( ctx, p.txn.DB(), seqValueKey, seqOpts.Increment) if err != nil { switch err.(type) { diff --git a/pkg/sql/set_cluster_setting.go b/pkg/sql/set_cluster_setting.go index ff6a189ddffb..94e4dbe483cd 100644 --- a/pkg/sql/set_cluster_setting.go +++ b/pkg/sql/set_cluster_setting.go @@ -16,7 +16,7 @@ import ( "strings" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings" @@ -110,7 +110,7 @@ func (n *setClusterSettingNode) startExec(params runParams) error { execCfg := params.extendedEvalCtx.ExecCfg var expectedEncodedValue string - if err := execCfg.DB.Txn(params.ctx, func(ctx context.Context, txn *client.Txn) error { + if err := execCfg.DB.Txn(params.ctx, func(ctx context.Context, txn *kv.Txn) error { var reportedValue string if n.value == nil { reportedValue = "DEFAULT" diff --git a/pkg/sql/set_zone_config.go b/pkg/sql/set_zone_config.go index f725d6e97b60..793ba8890a45 100644 --- a/pkg/sql/set_zone_config.go +++ b/pkg/sql/set_zone_config.go @@ -18,8 +18,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" @@ -825,7 +825,7 @@ func validateZoneAttrsAndLocalities( func writeZoneConfig( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, targetID sqlbase.ID, table *sqlbase.TableDescriptor, zone *zonepb.ZoneConfig, @@ -861,9 +861,7 @@ func writeZoneConfig( // getZoneConfigRaw looks up the zone config with the given ID. Unlike // getZoneConfig, it does not attempt to ascend the zone config hierarchy. If no // zone config exists for the given ID, it returns nil. -func getZoneConfigRaw( - ctx context.Context, txn *client.Txn, id sqlbase.ID, -) (*zonepb.ZoneConfig, error) { +func getZoneConfigRaw(ctx context.Context, txn *kv.Txn, id sqlbase.ID) (*zonepb.ZoneConfig, error) { kv, err := txn.Get(ctx, config.MakeZoneKey(uint32(id))) if err != nil { return nil, err @@ -886,7 +884,7 @@ func getZoneConfigRaw( // reuse an existing client.Txn safely. func removeIndexZoneConfigs( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, execCfg *ExecutorConfig, tableID sqlbase.ID, indexDescs []sqlbase.IndexDescriptor, diff --git a/pkg/sql/show_cluster_setting.go b/pkg/sql/show_cluster_setting.go index 80f190749046..04a8150f8fe4 100644 --- a/pkg/sql/show_cluster_setting.go +++ b/pkg/sql/show_cluster_setting.go @@ -18,7 +18,7 @@ import ( "strings" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -48,7 +48,7 @@ func (p *planner) showStateMachineSetting( // The (slight ab)use of WithMaxAttempts achieves convenient context cancellation. return retry.WithMaxAttempts(ctx, retry.Options{}, math.MaxInt32, func() error { - return p.execCfg.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return p.execCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { datums, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryRowEx( ctx, "read-setting", txn, diff --git a/pkg/sql/show_ranges.go b/pkg/sql/show_ranges.go index 81b061521fd8..f432f8d797e3 100644 --- a/pkg/sql/show_ranges.go +++ b/pkg/sql/show_ranges.go @@ -13,15 +13,13 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" ) // ScanMetaKVs returns the meta KVs for the ranges that touch the given span. -func ScanMetaKVs( - ctx context.Context, txn *client.Txn, span roachpb.Span, -) ([]client.KeyValue, error) { +func ScanMetaKVs(ctx context.Context, txn *kv.Txn, span roachpb.Span) ([]kv.KeyValue, error) { metaStart := keys.RangeMetaKey(keys.MustAddr(span.Key).Next()) metaEnd := keys.RangeMetaKey(keys.MustAddr(span.EndKey)) diff --git a/pkg/sql/sqlbase/index_encoding.go b/pkg/sql/sqlbase/index_encoding.go index 2897fc5c0e15..e3fa02bbcb80 100644 --- a/pkg/sql/sqlbase/index_encoding.go +++ b/pkg/sql/sqlbase/index_encoding.go @@ -14,8 +14,8 @@ import ( "fmt" "sort" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -715,7 +715,7 @@ func DecodeKeyVals( // // Don't use this function in the scan "hot path". func ExtractIndexKey( - a *DatumAlloc, tableDesc *TableDescriptor, entry client.KeyValue, + a *DatumAlloc, tableDesc *TableDescriptor, entry kv.KeyValue, ) (roachpb.Key, error) { indexID, key, err := DecodeIndexKeyPrefix(tableDesc, entry.Key) if err != nil { diff --git a/pkg/sql/sqlbase/namespace.go b/pkg/sql/sqlbase/namespace.go index 10ee8038ebcb..2fb074cfacdb 100644 --- a/pkg/sql/sqlbase/namespace.go +++ b/pkg/sql/sqlbase/namespace.go @@ -14,8 +14,8 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/util/log" ) @@ -59,7 +59,7 @@ import ( // RemoveObjectNamespaceEntry removes entries from both the deprecated and // new system.namespace table (if one exists). func RemoveObjectNamespaceEntry( - ctx context.Context, txn *client.Txn, parentID ID, parentSchemaID ID, name string, KVTrace bool, + ctx context.Context, txn *kv.Txn, parentID ID, parentSchemaID ID, name string, KVTrace bool, ) error { b := txn.NewBatch() var toDelete []DescriptorKey @@ -89,23 +89,21 @@ func RemoveObjectNamespaceEntry( // RemovePublicTableNamespaceEntry is a wrapper around RemoveObjectNamespaceEntry // for public tables. func RemovePublicTableNamespaceEntry( - ctx context.Context, txn *client.Txn, parentID ID, name string, + ctx context.Context, txn *kv.Txn, parentID ID, name string, ) error { return RemoveObjectNamespaceEntry(ctx, txn, parentID, keys.PublicSchemaID, name, false /* KVTrace */) } // RemoveSchemaNamespaceEntry is a wrapper around RemoveObjectNamespaceEntry // for schemas. -func RemoveSchemaNamespaceEntry( - ctx context.Context, txn *client.Txn, parentID ID, name string, -) error { +func RemoveSchemaNamespaceEntry(ctx context.Context, txn *kv.Txn, parentID ID, name string) error { return RemoveObjectNamespaceEntry(ctx, txn, parentID, keys.RootNamespaceID, name, false /* KVTrace */) } // RemoveDatabaseNamespaceEntry is a wrapper around RemoveObjectNamespaceEntry // for databases. func RemoveDatabaseNamespaceEntry( - ctx context.Context, txn *client.Txn, name string, KVTrace bool, + ctx context.Context, txn *kv.Txn, name string, KVTrace bool, ) error { return RemoveObjectNamespaceEntry(ctx, txn, keys.RootNamespaceID, keys.RootNamespaceID, name, KVTrace) } @@ -151,7 +149,7 @@ func MakeDatabaseNameKey( // (parentID, parentSchemaID, name) supplied. If cluster version < 20.1, // the parentSchemaID is ignored. func LookupObjectID( - ctx context.Context, txn *client.Txn, parentID ID, parentSchemaID ID, name string, + ctx context.Context, txn *kv.Txn, parentID ID, parentSchemaID ID, name string, ) (bool, ID, error) { var key DescriptorKey if parentID == keys.RootNamespaceID { @@ -205,12 +203,12 @@ func LookupObjectID( // LookupPublicTableID is a wrapper around LookupObjectID for public tables. func LookupPublicTableID( - ctx context.Context, txn *client.Txn, parentID ID, name string, + ctx context.Context, txn *kv.Txn, parentID ID, name string, ) (bool, ID, error) { return LookupObjectID(ctx, txn, parentID, keys.PublicSchemaID, name) } // LookupDatabaseID is a wrapper around LookupObjectID for databases. -func LookupDatabaseID(ctx context.Context, txn *client.Txn, name string) (bool, ID, error) { +func LookupDatabaseID(ctx context.Context, txn *kv.Txn, name string) (bool, ID, error) { return LookupObjectID(ctx, txn, keys.RootNamespaceID, keys.RootNamespaceID, name) } diff --git a/pkg/sql/sqlbase/structured.go b/pkg/sql/sqlbase/structured.go index d98a534b24a8..4d86a787de16 100644 --- a/pkg/sql/sqlbase/structured.go +++ b/pkg/sql/sqlbase/structured.go @@ -18,8 +18,8 @@ import ( "strings" "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" @@ -1531,7 +1531,7 @@ func (desc *MutableTableDescriptor) allocateColumnFamilyIDs(columnNames map[stri // MaybeIncrementVersion increments the version of a descriptor if necessary. func (desc *MutableTableDescriptor) MaybeIncrementVersion( - ctx context.Context, txn *client.Txn, settings *cluster.Settings, + ctx context.Context, txn *kv.Txn, settings *cluster.Settings, ) error { // Already incremented, no-op. if desc.Version == desc.ClusterVersion.Version+1 { @@ -1558,7 +1558,7 @@ func (desc *MutableTableDescriptor) MaybeIncrementVersion( // Validate validates that the table descriptor is well formed. Checks include // both single table and cross table invariants. -func (desc *TableDescriptor) Validate(ctx context.Context, txn *client.Txn) error { +func (desc *TableDescriptor) Validate(ctx context.Context, txn *kv.Txn) error { err := desc.ValidateTable() if err != nil { return err @@ -1571,7 +1571,7 @@ func (desc *TableDescriptor) Validate(ctx context.Context, txn *client.Txn) erro // validateCrossReferences validates that each reference to another table is // resolvable and that the necessary back references exist. -func (desc *TableDescriptor) validateCrossReferences(ctx context.Context, txn *client.Txn) error { +func (desc *TableDescriptor) validateCrossReferences(ctx context.Context, txn *kv.Txn) error { // Check that parent DB exists. { res, err := txn.Get(ctx, MakeDescMetadataKey(desc.ParentID)) diff --git a/pkg/sql/sqlbase/structured_test.go b/pkg/sql/sqlbase/structured_test.go index b9cb33cc6b87..2c7044fed937 100644 --- a/pkg/sql/sqlbase/structured_test.go +++ b/pkg/sql/sqlbase/structured_test.go @@ -18,8 +18,8 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -816,7 +816,7 @@ func TestValidateCrossTableReferences(t *testing.T) { t.Fatal(err) } } - txn := client.NewTxn(ctx, kvDB, s.NodeID()) + txn := kv.NewTxn(ctx, kvDB, s.NodeID()) if err := test.desc.validateCrossReferences(ctx, txn); err == nil { t.Errorf("%d: expected \"%s\", but found success: %+v", i, test.err, test.desc) } else if test.err != err.Error() && "internal error: "+test.err != err.Error() { diff --git a/pkg/sql/sqlbase/table.go b/pkg/sql/sqlbase/table.go index 964392c6ca6b..93fa889c5615 100644 --- a/pkg/sql/sqlbase/table.go +++ b/pkg/sql/sqlbase/table.go @@ -16,7 +16,7 @@ import ( "sort" "strings" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -310,7 +310,7 @@ type tableLookupFn func(ID) (*TableDescriptor, error) // GetConstraintInfo returns a summary of all constraints on the table. func (desc *TableDescriptor) GetConstraintInfo( - ctx context.Context, txn *client.Txn, + ctx context.Context, txn *kv.Txn, ) (map[string]ConstraintDetail, error) { var tableLookup tableLookupFn if txn != nil { @@ -514,7 +514,7 @@ func FindFKOriginIndex( // because the marshaling is not guaranteed to be stable and also because it's // sensitive to things like missing vs default values of fields. func ConditionalGetTableDescFromTxn( - ctx context.Context, txn *client.Txn, expectation *TableDescriptor, + ctx context.Context, txn *kv.Txn, expectation *TableDescriptor, ) (*roachpb.Value, error) { key := MakeDescMetadataKey(expectation.ID) existingKV, err := txn.Get(ctx, key) diff --git a/pkg/sql/sqlbase/table_test.go b/pkg/sql/sqlbase/table_test.go index 9ebc61aaebe5..7ebd1f406365 100644 --- a/pkg/sql/sqlbase/table_test.go +++ b/pkg/sql/sqlbase/table_test.go @@ -21,7 +21,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -221,7 +221,7 @@ func TestIndexKey(t *testing.T) { t.Fatal(err) } primaryValue := roachpb.MakeValueFromBytes(nil) - primaryIndexKV := client.KeyValue{Key: primaryKey, Value: &primaryValue} + primaryIndexKV := kv.KeyValue{Key: primaryKey, Value: &primaryValue} secondaryIndexEntry, err := EncodeSecondaryIndex( &tableDesc, &tableDesc.Indexes[0], colMap, testValues, true /* includeEmpty */) @@ -231,12 +231,12 @@ func TestIndexKey(t *testing.T) { if err != nil { t.Fatal(err) } - secondaryIndexKV := client.KeyValue{ + secondaryIndexKV := kv.KeyValue{ Key: secondaryIndexEntry[0].Key, Value: &secondaryIndexEntry[0].Value, } - checkEntry := func(index *IndexDescriptor, entry client.KeyValue) { + checkEntry := func(index *IndexDescriptor, entry kv.KeyValue) { values, err := decodeIndex(&tableDesc, index, entry.Key) if err != nil { t.Fatal(err) diff --git a/pkg/sql/sqlbase/testutils.go b/pkg/sql/sqlbase/testutils.go index 1232da5d5dac..ad853becb4d5 100644 --- a/pkg/sql/sqlbase/testutils.go +++ b/pkg/sql/sqlbase/testutils.go @@ -25,7 +25,7 @@ import ( "unicode" "github.com/cockroachdb/apd" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" @@ -47,7 +47,7 @@ import ( // This file contains utility functions for tests (in other packages). // GetTableDescriptor retrieves a table descriptor directly from the KV layer. -func GetTableDescriptor(kvDB *client.DB, database string, table string) *TableDescriptor { +func GetTableDescriptor(kvDB *kv.DB, database string, table string) *TableDescriptor { // log.VEventf(context.TODO(), 2, "GetTableDescriptor %q %q", database, table) // testutil, so we pass settings as nil for both database and table name keys. dKey := NewDatabaseKey(database) @@ -89,7 +89,7 @@ func GetTableDescriptor(kvDB *client.DB, database string, table string) *TableDe // GetImmutableTableDescriptor retrieves an immutable table descriptor directly from the KV layer. func GetImmutableTableDescriptor( - kvDB *client.DB, database string, table string, + kvDB *kv.DB, database string, table string, ) *ImmutableTableDescriptor { return NewImmutableTableDescriptor(*GetTableDescriptor(kvDB, database, table)) } diff --git a/pkg/sql/sqlbase/utils_test.go b/pkg/sql/sqlbase/utils_test.go index b2125ee118b4..0fe071f45311 100644 --- a/pkg/sql/sqlbase/utils_test.go +++ b/pkg/sql/sqlbase/utils_test.go @@ -17,7 +17,7 @@ import ( "testing" "github.com/cockroachdb/apd" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/util/encoding" @@ -42,7 +42,7 @@ var tableNames = map[string]bool{ // - 'd' first byte - decimal (ascending) // - NULLASC, NULLDESC, NOTNULLASC, NOTNULLDESC // - PrefixEnd -func EncodeTestKey(tb testing.TB, kvDB *client.DB, keyStr string) roachpb.Key { +func EncodeTestKey(tb testing.TB, kvDB *kv.DB, keyStr string) roachpb.Key { var key []byte tokens := strings.Split(keyStr, "/") if tokens[0] != "" { diff --git a/pkg/sql/sqlutil/internal_executor.go b/pkg/sql/sqlutil/internal_executor.go index a14a7e79e470..3a9f06aed25a 100644 --- a/pkg/sql/sqlutil/internal_executor.go +++ b/pkg/sql/sqlutil/internal_executor.go @@ -13,7 +13,7 @@ package sqlutil import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -33,7 +33,7 @@ type InternalExecutor interface { // Exec is deprecated because it may transparently execute a query as root. Use // ExecEx instead. Exec( - ctx context.Context, opName string, txn *client.Txn, statement string, params ...interface{}, + ctx context.Context, opName string, txn *kv.Txn, statement string, params ...interface{}, ) (int, error) // ExecEx is like Exec, but allows the caller to override some session data @@ -44,7 +44,7 @@ type InternalExecutor interface { ExecEx( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, o sqlbase.InternalExecutorSessionDataOverride, stmt string, qargs ...interface{}, @@ -59,7 +59,7 @@ type InternalExecutor interface { // Query is deprecated because it may transparently execute a query as root. Use // QueryEx instead. Query( - ctx context.Context, opName string, txn *client.Txn, statement string, qargs ...interface{}, + ctx context.Context, opName string, txn *kv.Txn, statement string, qargs ...interface{}, ) ([]tree.Datums, error) // QueryEx is like Query, but allows the caller to override some session data @@ -70,7 +70,7 @@ type InternalExecutor interface { QueryEx( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, session sqlbase.InternalExecutorSessionDataOverride, stmt string, qargs ...interface{}, @@ -79,7 +79,7 @@ type InternalExecutor interface { // QueryWithCols is like QueryEx, but it also returns the computed ResultColumns // of the input query. QueryWithCols( - ctx context.Context, opName string, txn *client.Txn, + ctx context.Context, opName string, txn *kv.Txn, o sqlbase.InternalExecutorSessionDataOverride, statement string, qargs ...interface{}, ) ([]tree.Datums, sqlbase.ResultColumns, error) @@ -88,7 +88,7 @@ type InternalExecutor interface { // // QueryRow is deprecated (like Query). Use QueryRowEx() instead. QueryRow( - ctx context.Context, opName string, txn *client.Txn, statement string, qargs ...interface{}, + ctx context.Context, opName string, txn *kv.Txn, statement string, qargs ...interface{}, ) (tree.Datums, error) // QueryRowEx is like QueryRow, but allows the caller to override some session data @@ -99,7 +99,7 @@ type InternalExecutor interface { QueryRowEx( ctx context.Context, opName string, - txn *client.Txn, + txn *kv.Txn, session sqlbase.InternalExecutorSessionDataOverride, stmt string, qargs ...interface{}, diff --git a/pkg/sql/statement_diagnostics.go b/pkg/sql/statement_diagnostics.go index 3863efc0d4cf..007f8a6f682c 100644 --- a/pkg/sql/statement_diagnostics.go +++ b/pkg/sql/statement_diagnostics.go @@ -16,7 +16,7 @@ import ( "errors" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -60,13 +60,13 @@ type stmtDiagnosticsRequestRegistry struct { epoch int } ie *InternalExecutor - db *client.DB + db *kv.DB gossip *gossip.Gossip nodeID roachpb.NodeID } func newStmtDiagnosticsRequestRegistry( - ie *InternalExecutor, db *client.DB, g *gossip.Gossip, nodeID roachpb.NodeID, + ie *InternalExecutor, db *kv.DB, g *gossip.Gossip, nodeID roachpb.NodeID, ) *stmtDiagnosticsRequestRegistry { r := &stmtDiagnosticsRequestRegistry{ ie: ie, @@ -117,7 +117,7 @@ func (r *stmtDiagnosticsRequestRegistry) InsertRequest( ctx context.Context, fprint string, ) (int, error) { var requestID int - err := r.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { // Check if there's already a pending request for this fingerprint. row, err := r.ie.QueryRowEx(ctx, "stmt-diag-check-pending", txn, sqlbase.InternalExecutorSessionDataOverride{ @@ -260,7 +260,7 @@ func (r *stmtDiagnosticsRequestRegistry) shouldCollectDiagnostics( func (r *stmtDiagnosticsRequestRegistry) insertDiagnostics( ctx context.Context, req stmtDiagRequest, stmt string, trace tracing.Recording, ) error { - return r.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { { row, err := r.ie.QueryRowEx(ctx, "stmt-diag-check-completed", txn, sqlbase.InternalExecutorSessionDataOverride{User: security.RootUser}, diff --git a/pkg/sql/stats/automatic_stats_test.go b/pkg/sql/stats/automatic_stats_test.go index f67fb9832737..d5b9264fea2e 100644 --- a/pkg/sql/stats/automatic_stats_test.go +++ b/pkg/sql/stats/automatic_stats_test.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -172,7 +172,7 @@ func TestAverageRefreshTime(t *testing.T) { } insertStat := func( - txn *client.Txn, name string, columnIDs *tree.DArray, createdAt *tree.DTimestamp, + txn *kv.Txn, name string, columnIDs *tree.DArray, createdAt *tree.DTimestamp, ) error { _, err := executor.Exec( ctx, "insert-statistic", txn, @@ -198,7 +198,7 @@ func TestAverageRefreshTime(t *testing.T) { // Add some stats on column k in table a with a name different from // AutoStatsName, separated by three hours each, starting 7 hours ago. - if err := s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { for i := 0; i < 10; i++ { columnIDsVal := tree.NewDArray(types.Int) if err := columnIDsVal.Append(tree.NewDInt(tree.DInt(1))); err != nil { @@ -228,7 +228,7 @@ func TestAverageRefreshTime(t *testing.T) { // Add some stats on column v in table a with name AutoStatsName, separated // by three hours each, starting 6 hours ago. - if err := s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { for i := 0; i < 10; i++ { columnIDsVal := tree.NewDArray(types.Int) if err := columnIDsVal.Append(tree.NewDInt(tree.DInt(2))); err != nil { @@ -276,7 +276,7 @@ func TestAverageRefreshTime(t *testing.T) { // Add some stats on column k in table a with name AutoStatsName, separated // by 1.5 hours each, starting 5 hours ago. - if err := s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { for i := 0; i < 10; i++ { columnIDsVal := tree.NewDArray(types.Int) if err := columnIDsVal.Append(tree.NewDInt(tree.DInt(1))); err != nil { diff --git a/pkg/sql/stats/delete_stats.go b/pkg/sql/stats/delete_stats.go index a55f78d9efaa..fcad98dfeada 100644 --- a/pkg/sql/stats/delete_stats.go +++ b/pkg/sql/stats/delete_stats.go @@ -13,7 +13,7 @@ package stats import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" @@ -36,7 +36,7 @@ const ( func DeleteOldStatsForColumns( ctx context.Context, executor sqlutil.InternalExecutor, - txn *client.Txn, + txn *kv.Txn, tableID sqlbase.ID, columnIDs []sqlbase.ColumnID, ) error { diff --git a/pkg/sql/stats/delete_stats_test.go b/pkg/sql/stats/delete_stats_test.go index 1c3cd9990084..3649f23e2733 100644 --- a/pkg/sql/stats/delete_stats_test.go +++ b/pkg/sql/stats/delete_stats_test.go @@ -19,7 +19,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -251,7 +251,7 @@ func TestDeleteOldStatsForColumns(t *testing.T) { checkDelete := func( tableID sqlbase.ID, columnIDs []sqlbase.ColumnID, expectDeleted map[uint64]struct{}, ) error { - if err := s.DB().Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := s.DB().Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return DeleteOldStatsForColumns(ctx, ex, txn, tableID, columnIDs) }); err != nil { return err diff --git a/pkg/sql/stats/new_stat.go b/pkg/sql/stats/new_stat.go index 5759ce2c039e..35f895076bf6 100644 --- a/pkg/sql/stats/new_stat.go +++ b/pkg/sql/stats/new_stat.go @@ -14,7 +14,7 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" @@ -27,7 +27,7 @@ import ( func InsertNewStats( ctx context.Context, executor sqlutil.InternalExecutor, - txn *client.Txn, + txn *kv.Txn, tableStats []*TableStatisticProto, ) error { var err error @@ -57,7 +57,7 @@ func InsertNewStats( func InsertNewStat( ctx context.Context, executor sqlutil.InternalExecutor, - txn *client.Txn, + txn *kv.Txn, tableID sqlbase.ID, name string, columnIDs []sqlbase.ColumnID, diff --git a/pkg/sql/stats/stats_cache.go b/pkg/sql/stats/stats_cache.go index e5f1d0964762..ebe386f43c41 100644 --- a/pkg/sql/stats/stats_cache.go +++ b/pkg/sql/stats/stats_cache.go @@ -15,7 +15,7 @@ import ( "sync" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/opt/cat" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -56,7 +56,7 @@ type TableStatisticsCache struct { numInternalQueries int64 } Gossip *gossip.Gossip - ClientDB *client.DB + ClientDB *kv.DB SQLExecutor sqlutil.InternalExecutor } @@ -77,7 +77,7 @@ type cacheEntry struct { // NewTableStatisticsCache creates a new TableStatisticsCache that can hold // statistics for tables. func NewTableStatisticsCache( - cacheSize int, g *gossip.Gossip, db *client.DB, sqlExecutor sqlutil.InternalExecutor, + cacheSize int, g *gossip.Gossip, db *kv.DB, sqlExecutor sqlutil.InternalExecutor, ) *TableStatisticsCache { tableStatsCache := &TableStatisticsCache{ Gossip: g, diff --git a/pkg/sql/stats/stats_cache_test.go b/pkg/sql/stats/stats_cache_test.go index 343fd87f91f7..bcd943b883e6 100644 --- a/pkg/sql/stats/stats_cache_test.go +++ b/pkg/sql/stats/stats_cache_test.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" @@ -34,7 +34,7 @@ import ( ) func insertTableStat( - ctx context.Context, db *client.DB, ex sqlutil.InternalExecutor, stat *TableStatisticProto, + ctx context.Context, db *kv.DB, ex sqlutil.InternalExecutor, stat *TableStatisticProto, ) error { insertStatStmt := ` INSERT INTO system.table_statistics ("tableID", "statisticID", name, "columnIDs", "createdAt", @@ -134,7 +134,7 @@ func checkStats(actual []*TableStatistic, expected []*TableStatisticProto) bool } func initTestData( - ctx context.Context, db *client.DB, ex sqlutil.InternalExecutor, + ctx context.Context, db *kv.DB, ex sqlutil.InternalExecutor, ) (map[sqlbase.ID][]*TableStatisticProto, error) { // The expected stats must be ordered by TableID+, CreatedAt- so they can // later be compared with the returned stats using reflect.DeepEqual. diff --git a/pkg/sql/table.go b/pkg/sql/table.go index dae5c6785f40..40d3b9b6b0ce 100644 --- a/pkg/sql/table.go +++ b/pkg/sql/table.go @@ -17,10 +17,10 @@ import ( "strings" "sync" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/schema" @@ -183,7 +183,7 @@ func isSupportedSchemaName(n tree.Name) bool { // return a nil descriptor and no error if the table does not exist. // func (tc *TableCollection) getMutableTableDescriptor( - ctx context.Context, txn *client.Txn, tn *tree.TableName, flags tree.ObjectLookupFlags, + ctx context.Context, txn *kv.Txn, tn *tree.TableName, flags tree.ObjectLookupFlags, ) (*sqlbase.MutableTableDescriptor, error) { if log.V(2) { log.Infof(ctx, "reading mutable descriptor on table '%s'", tn) @@ -240,7 +240,7 @@ func (tc *TableCollection) getMutableTableDescriptor( // resolveSchemaID attempts to lookup the schema from the schemaCache if it exists, // otherwise falling back to a database lookup. func (tc *TableCollection) resolveSchemaID( - ctx context.Context, txn *client.Txn, dbID sqlbase.ID, schemaName string, + ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, schemaName string, ) (bool, sqlbase.ID, error) { // Fast path public schema, as it is always found. if schemaName == tree.PublicSchema { @@ -279,7 +279,7 @@ func (tc *TableCollection) resolveSchemaID( // the validity window of the table descriptor version returned. // func (tc *TableCollection) getTableVersion( - ctx context.Context, txn *client.Txn, tn *tree.TableName, flags tree.ObjectLookupFlags, + ctx context.Context, txn *kv.Txn, tn *tree.TableName, flags tree.ObjectLookupFlags, ) (*sqlbase.ImmutableTableDescriptor, error) { if log.V(2) { log.Infof(ctx, "planner acquiring lease on table '%s'", tn) @@ -401,7 +401,7 @@ func (tc *TableCollection) getTableVersion( // getTableVersionByID is a by-ID variant of getTableVersion (i.e. uses same cache). func (tc *TableCollection) getTableVersionByID( - ctx context.Context, txn *client.Txn, tableID sqlbase.ID, flags tree.ObjectLookupFlags, + ctx context.Context, txn *kv.Txn, tableID sqlbase.ID, flags tree.ObjectLookupFlags, ) (*sqlbase.ImmutableTableDescriptor, error) { log.VEventf(ctx, 2, "planner getting table on table ID %d", tableID) @@ -467,7 +467,7 @@ func (tc *TableCollection) getTableVersionByID( // getMutableTableVersionByID is a variant of sqlbase.GetTableDescFromID which returns a mutable // table descriptor of the table modified in the same transaction. func (tc *TableCollection) getMutableTableVersionByID( - ctx context.Context, tableID sqlbase.ID, txn *client.Txn, + ctx context.Context, tableID sqlbase.ID, txn *kv.Txn, ) (*sqlbase.MutableTableDescriptor, error) { log.VEventf(ctx, 2, "planner getting mutable table on table ID %d", tableID) @@ -715,7 +715,7 @@ func (tc *TableCollection) getUncommittedTableByID(id sqlbase.ID) uncommittedTab // first checking the TableCollection's cached descriptors for validity // before defaulting to a key-value scan, if necessary. func (tc *TableCollection) getAllDescriptors( - ctx context.Context, txn *client.Txn, + ctx context.Context, txn *kv.Txn, ) ([]sqlbase.DescriptorProto, error) { if tc.allDescriptors == nil { descs, err := GetAllDescriptors(ctx, txn) @@ -732,7 +732,7 @@ func (tc *TableCollection) getAllDescriptors( // validity before scanning system.namespace and looking up the descriptors // in the database cache, if necessary. func (tc *TableCollection) getAllDatabaseDescriptors( - ctx context.Context, txn *client.Txn, + ctx context.Context, txn *kv.Txn, ) ([]*sqlbase.DatabaseDescriptor, error) { if tc.allDatabaseDescriptors == nil { dbDescIDs, err := GetAllDatabaseDescriptorIDs(ctx, txn) @@ -756,7 +756,7 @@ func (tc *TableCollection) getAllDatabaseDescriptors( // visible by the transaction. This uses the schema cache locally // if possible, or else performs a scan on kv. func (tc *TableCollection) getSchemasForDatabase( - ctx context.Context, txn *client.Txn, dbID sqlbase.ID, + ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, ) (map[sqlbase.ID]string, error) { if tc.allSchemasForDatabase == nil { tc.allSchemasForDatabase = make(map[sqlbase.ID]map[sqlbase.ID]string) @@ -992,7 +992,7 @@ func (p *planner) writeSchemaChangeToBatch( ctx context.Context, tableDesc *sqlbase.MutableTableDescriptor, mutationID sqlbase.MutationID, - b *client.Batch, + b *kv.Batch, ) error { if tableDesc.Dropped() { // We don't allow schema changes on a dropped table. @@ -1021,7 +1021,7 @@ func (p *planner) writeTableDescToBatch( ctx context.Context, tableDesc *sqlbase.MutableTableDescriptor, mutationID sqlbase.MutationID, - b *client.Batch, + b *kv.Batch, ) error { if tableDesc.IsVirtualTable() { return errors.AssertionFailedf("virtual descriptors cannot be stored, found: %v", tableDesc) diff --git a/pkg/sql/tablewriter.go b/pkg/sql/tablewriter.go index ca35c0c140c1..c95701262608 100644 --- a/pkg/sql/tablewriter.go +++ b/pkg/sql/tablewriter.go @@ -13,7 +13,7 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowcontainer" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -45,7 +45,7 @@ type tableWriter interface { // init provides the tableWriter with a Txn and optional monitor to write to // and returns an error if it was misconfigured. - init(context.Context, *client.Txn, *tree.EvalContext) error + init(context.Context, *kv.Txn, *tree.EvalContext) error // row performs a sql row modification (tableInserter performs an insert, // etc). It batches up writes to the init'd txn and periodically sends them. @@ -107,16 +107,16 @@ const ( // the other tableWriters. type tableWriterBase struct { // txn is the current KV transaction. - txn *client.Txn + txn *kv.Txn // is autoCommit turned on. autoCommit autoCommitOpt // b is the current batch. - b *client.Batch + b *kv.Batch // batchSize is the current batch size (when known). batchSize int } -func (tb *tableWriterBase) init(txn *client.Txn) { +func (tb *tableWriterBase) init(txn *kv.Txn) { tb.txn = txn tb.b = txn.NewBatch() } diff --git a/pkg/sql/tablewriter_delete.go b/pkg/sql/tablewriter_delete.go index e838a295ceca..250c5dbb6a6b 100644 --- a/pkg/sql/tablewriter_delete.go +++ b/pkg/sql/tablewriter_delete.go @@ -14,7 +14,7 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowcontainer" @@ -43,7 +43,7 @@ func (*tableDeleter) desc() string { return "deleter" } func (td *tableDeleter) walkExprs(_ func(desc string, index int, expr tree.TypedExpr)) {} // init is part of the tableWriter interface. -func (td *tableDeleter) init(_ context.Context, txn *client.Txn, _ *tree.EvalContext) error { +func (td *tableDeleter) init(_ context.Context, txn *kv.Txn, _ *tree.EvalContext) error { td.tableWriterBase.init(txn) return nil } @@ -253,7 +253,7 @@ func (td *tableDeleter) clearIndex(ctx context.Context, idx *sqlbase.IndexDescri // ClearRange cannot be run in a transaction, so create a // non-transactional batch to send the request. - b := &client.Batch{} + b := &kv.Batch{} b.AddRawRequest(&roachpb.ClearRangeRequest{ RequestHeader: roachpb.RequestHeader{ Key: sp.Key, diff --git a/pkg/sql/tablewriter_insert.go b/pkg/sql/tablewriter_insert.go index 151d3f347576..915a8d3f4513 100644 --- a/pkg/sql/tablewriter_insert.go +++ b/pkg/sql/tablewriter_insert.go @@ -13,7 +13,7 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowcontainer" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -32,7 +32,7 @@ var _ tableWriter = &tableInserter{} func (*tableInserter) desc() string { return "inserter" } // init is part of the tableWriter interface. -func (ti *tableInserter) init(_ context.Context, txn *client.Txn, _ *tree.EvalContext) error { +func (ti *tableInserter) init(_ context.Context, txn *kv.Txn, _ *tree.EvalContext) error { ti.tableWriterBase.init(txn) return nil } diff --git a/pkg/sql/tablewriter_update.go b/pkg/sql/tablewriter_update.go index a97c00a0d550..cf537d070c1a 100644 --- a/pkg/sql/tablewriter_update.go +++ b/pkg/sql/tablewriter_update.go @@ -13,7 +13,7 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowcontainer" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -32,7 +32,7 @@ var _ tableWriter = &tableUpdater{} func (*tableUpdater) desc() string { return "updater" } // init is part of the tableWriter interface. -func (tu *tableUpdater) init(_ context.Context, txn *client.Txn, _ *tree.EvalContext) error { +func (tu *tableUpdater) init(_ context.Context, txn *kv.Txn, _ *tree.EvalContext) error { tu.tableWriterBase.init(txn) return nil } diff --git a/pkg/sql/tablewriter_upsert_opt.go b/pkg/sql/tablewriter_upsert_opt.go index 6be0020f6e9e..a7ce5f786bc3 100644 --- a/pkg/sql/tablewriter_upsert_opt.go +++ b/pkg/sql/tablewriter_upsert_opt.go @@ -13,7 +13,7 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql/row" "github.com/cockroachdb/cockroach/pkg/sql/rowcontainer" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -116,7 +116,7 @@ var _ tableWriter = &optTableUpserter{} // init is part of the tableWriter interface. func (tu *optTableUpserter) init( - ctx context.Context, txn *client.Txn, evalCtx *tree.EvalContext, + ctx context.Context, txn *kv.Txn, evalCtx *tree.EvalContext, ) error { tu.tableWriterBase.init(txn) tableDesc := tu.tableDesc() @@ -295,7 +295,7 @@ func (tu *optTableUpserter) atBatchEnd(ctx context.Context, traceKV bool) error // there was no conflict. If the RETURNING clause was specified, then the // inserted row is stored in the rowsUpserted collection. func (tu *optTableUpserter) insertNonConflictingRow( - ctx context.Context, b *client.Batch, insertRow tree.Datums, overwrite, traceKV bool, + ctx context.Context, b *kv.Batch, insertRow tree.Datums, overwrite, traceKV bool, ) error { // Perform the insert proper. if err := tu.ri.InsertRow( @@ -341,7 +341,7 @@ func (tu *optTableUpserter) insertNonConflictingRow( // rowsUpserted collection. func (tu *optTableUpserter) updateConflictingRow( ctx context.Context, - b *client.Batch, + b *kv.Batch, fetchRow tree.Datums, updateValues tree.Datums, tableDesc *sqlbase.ImmutableTableDescriptor, diff --git a/pkg/sql/temporary_schema.go b/pkg/sql/temporary_schema.go index 8ca3afd17324..1d1843aaef1e 100644 --- a/pkg/sql/temporary_schema.go +++ b/pkg/sql/temporary_schema.go @@ -15,7 +15,7 @@ import ( "fmt" "strings" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -47,7 +47,7 @@ func (p *planner) createSchemaWithID( log.VEventf(ctx, 2, "CPut %s -> %d", schemaNameKey, schemaID) } - b := &client.Batch{} + b := &kv.Batch{} b.CPut(schemaNameKey, schemaID, nil) return p.txn.Run(ctx, b) @@ -63,7 +63,7 @@ func temporarySchemaName(sessionID ClusterWideID) string { // getTemporaryObjectNames returns all the temporary objects under the // temporary schema of the given dbID. func getTemporaryObjectNames( - ctx context.Context, txn *client.Txn, dbID sqlbase.ID, tempSchemaName string, + ctx context.Context, txn *kv.Txn, dbID sqlbase.ID, tempSchemaName string, ) (TableNames, error) { dbDesc, err := MustGetDatabaseDescByID(ctx, txn, dbID) if err != nil { @@ -90,7 +90,7 @@ func cleanupSessionTempObjects(ctx context.Context, server *Server, sessionID Cl } ie := MakeInternalExecutor(ctx, server, MemoryMetrics{}, server.cfg.Settings) ie.SetSessionData(sd) - return server.cfg.DB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return server.cfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { // We are going to read all database descriptor IDs, then for each database // we will drop all the objects under the temporary schema. dbIDs, err := GetAllDatabaseDescriptorIDs(ctx, txn) @@ -117,8 +117,8 @@ func cleanupSessionTempObjects(ctx context.Context, server *Server, sessionID Cl func TestingCleanupSchemaObjects( ctx context.Context, settings *cluster.Settings, - execQuery func(context.Context, string, *client.Txn, string, ...interface{}) (int, error), - txn *client.Txn, + execQuery func(context.Context, string, *kv.Txn, string, ...interface{}) (int, error), + txn *kv.Txn, dbID sqlbase.ID, schemaName string, ) error { @@ -129,8 +129,8 @@ func TestingCleanupSchemaObjects( func cleanupSchemaObjects( ctx context.Context, settings *cluster.Settings, - execQuery func(context.Context, string, *client.Txn, string, ...interface{}) (int, error), - txn *client.Txn, + execQuery func(context.Context, string, *kv.Txn, string, ...interface{}) (int, error), + txn *kv.Txn, dbID sqlbase.ID, schemaName string, ) error { diff --git a/pkg/sql/temporary_schema_test.go b/pkg/sql/temporary_schema_test.go index 555cb68db030..e6d791ad9d8f 100644 --- a/pkg/sql/temporary_schema_test.go +++ b/pkg/sql/temporary_schema_test.go @@ -17,7 +17,7 @@ import ( "strings" "testing" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -89,12 +89,12 @@ INSERT INTO perm_table VALUES (DEFAULT, 1); require.NoError( t, - kvDB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { err = sql.TestingCleanupSchemaObjects( ctx, s.ExecutorConfig().(sql.ExecutorConfig).Settings, func( - ctx context.Context, _ string, _ *client.Txn, query string, _ ...interface{}, + ctx context.Context, _ string, _ *kv.Txn, query string, _ ...interface{}, ) (int, error) { _, err := conn.ExecContext(ctx, query) return 0, err diff --git a/pkg/sql/tests/data.go b/pkg/sql/tests/data.go index 5524008f7362..631530a5621f 100644 --- a/pkg/sql/tests/data.go +++ b/pkg/sql/tests/data.go @@ -17,13 +17,13 @@ import ( "fmt" "testing" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" ) // CheckKeyCount checks that the number of keys in the provided span matches // numKeys. -func CheckKeyCount(t *testing.T, kvDB *client.DB, span roachpb.Span, numKeys int) { +func CheckKeyCount(t *testing.T, kvDB *kv.DB, span roachpb.Span, numKeys int) { t.Helper() if kvs, err := kvDB.Scan(context.TODO(), span.Key, span.EndKey, 0); err != nil { t.Fatal(err) diff --git a/pkg/sql/tests/kv_test.go b/pkg/sql/tests/kv_test.go index 321e5d054f38..4931dabbea0d 100644 --- a/pkg/sql/tests/kv_test.go +++ b/pkg/sql/tests/kv_test.go @@ -22,7 +22,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + kv2 "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/pkg/errors" ) @@ -39,7 +39,7 @@ type kvInterface interface { // kvNative uses the native client package to implement kvInterface. type kvNative struct { - db *client.DB + db *kv2.DB epoch int prefix string doneFn func() @@ -62,7 +62,7 @@ func newKVNative(b *testing.B) kvInterface { func (kv *kvNative) Insert(rows, run int) error { firstRow := rows * run lastRow := rows * (run + 1) - err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *kv2.Txn) error { b := txn.NewBatch() for i := firstRow; i < lastRow; i++ { b.Put(fmt.Sprintf("%s%08d", kv.prefix, i), i) @@ -74,7 +74,7 @@ func (kv *kvNative) Insert(rows, run int) error { func (kv *kvNative) Update(rows, run int) error { perm := rand.Perm(rows) - err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *kv2.Txn) error { // Read all values in a batch. b := txn.NewBatch() for i := 0; i < rows; i++ { @@ -97,7 +97,7 @@ func (kv *kvNative) Update(rows, run int) error { func (kv *kvNative) Delete(rows, run int) error { firstRow := rows * run lastRow := rows * (run + 1) - err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *kv2.Txn) error { b := txn.NewBatch() for i := firstRow; i < lastRow; i++ { b.Del(fmt.Sprintf("%s%08d", kv.prefix, i)) @@ -108,8 +108,8 @@ func (kv *kvNative) Delete(rows, run int) error { } func (kv *kvNative) Scan(rows, run int) error { - var kvs []client.KeyValue - err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + var kvs []kv2.KeyValue + err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *kv2.Txn) error { var err error kvs, err = txn.Scan(ctx, fmt.Sprintf("%s%08d", kv.prefix, 0), fmt.Sprintf("%s%08d", kv.prefix, rows), int64(rows)) return err @@ -126,7 +126,7 @@ func (kv *kvNative) prep(rows int, initData bool) error { if !initData { return nil } - err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *kv2.Txn) error { b := txn.NewBatch() for i := 0; i < rows; i++ { b.Put(fmt.Sprintf("%s%08d", kv.prefix, i), i) diff --git a/pkg/sql/tests/split_test.go b/pkg/sql/tests/split_test.go index 75920fb8c3e9..38bd5650f231 100644 --- a/pkg/sql/tests/split_test.go +++ b/pkg/sql/tests/split_test.go @@ -16,8 +16,8 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql/tests" @@ -28,7 +28,7 @@ import ( ) // getRangeKeys returns the end keys of all ranges. -func getRangeKeys(db *client.DB) ([]roachpb.Key, error) { +func getRangeKeys(db *kv.DB) ([]roachpb.Key, error) { rows, err := db.Scan(context.TODO(), keys.Meta2Prefix, keys.MetaMax, 0) if err != nil { return nil, err @@ -40,7 +40,7 @@ func getRangeKeys(db *client.DB) ([]roachpb.Key, error) { return ret, nil } -func getNumRanges(db *client.DB) (int, error) { +func getNumRanges(db *kv.DB) (int, error) { rows, err := getRangeKeys(db) if err != nil { return 0, err diff --git a/pkg/sql/truncate.go b/pkg/sql/truncate.go index 5d979699d2cc..c9dbd6231d1e 100644 --- a/pkg/sql/truncate.go +++ b/pkg/sql/truncate.go @@ -14,9 +14,9 @@ import ( "context" "github.com/cockroachdb/cockroach/pkg/config" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/sql/privilege" @@ -216,7 +216,7 @@ func (p *planner) truncateTable( nameKey := sqlbase.MakePublicTableNameKey(ctx, p.ExecCfg().Settings, tableDesc.ParentID, tableDesc.GetName()).Key() key := sqlbase.MakePublicTableNameKey(ctx, p.ExecCfg().Settings, newTableDesc.ParentID, newTableDesc.Name).Key() - b := &client.Batch{} + b := &kv.Batch{} // Use CPut because we want to remove a specific name -> id map. if traceKV { log.VEventf(ctx, 2, "CPut %s -> nil", nameKey) @@ -288,7 +288,7 @@ func (p *planner) truncateTable( } // Copy the zone config. - b = &client.Batch{} + b = &kv.Batch{} b.Get(zoneKey) if err := p.txn.Run(ctx, b); err != nil { return err @@ -543,7 +543,7 @@ func reassignIndexComment( // can even eliminate the need to use a transaction for each chunk at a later // stage if it proves inefficient). func truncateTableInChunks( - ctx context.Context, tableDesc *sqlbase.TableDescriptor, db *client.DB, traceKV bool, + ctx context.Context, tableDesc *sqlbase.TableDescriptor, db *kv.DB, traceKV bool, ) error { const chunkSize = TableTruncateChunkSize var resume roachpb.Span @@ -553,7 +553,7 @@ func truncateTableInChunks( if traceKV { log.VEventf(ctx, 2, "table %s truncate at row: %d, span: %s", tableDesc.Name, rowIdx, resume) } - if err := db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { rd, err := row.MakeDeleter( ctx, txn, diff --git a/pkg/sql/txn_restart_test.go b/pkg/sql/txn_restart_test.go index 5d5b7990f8c8..a4c2f9bc63b5 100644 --- a/pkg/sql/txn_restart_test.go +++ b/pkg/sql/txn_restart_test.go @@ -24,7 +24,7 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/storagebase" @@ -1097,7 +1097,7 @@ func TestNonRetryableError(t *testing.T) { cleanupFilter := cmdFilters.AppendFilter( func(args storagebase.FilterArgs) *roachpb.Error { if req, ok := args.Req.(*roachpb.ScanRequest); ok { - if bytes.Contains(req.Key, testKey) && !client.TestingIsRangeLookupRequest(req) { + if bytes.Contains(req.Key, testKey) && !kv.TestingIsRangeLookupRequest(req) { hitError = true return roachpb.NewErrorWithTxn(fmt.Errorf("testError"), args.Hdr.Txn) } @@ -1151,7 +1151,7 @@ func TestReacquireLeaseOnRestart(t *testing.T) { // Hack to advance the transaction timestamp on a transaction restart. for _, union := range ba.Requests { if req, ok := union.GetInner().(*roachpb.ScanRequest); ok { - if bytes.Contains(req.Key, testKey) && !client.TestingIsRangeLookupRequest(req) { + if bytes.Contains(req.Key, testKey) && !kv.TestingIsRangeLookupRequest(req) { atomic.AddInt32(&clockUpdate, 1) now := c.Now() now.WallTime += advancement.Nanoseconds() @@ -1184,7 +1184,7 @@ func TestReacquireLeaseOnRestart(t *testing.T) { } if req, ok := args.Req.(*roachpb.ScanRequest); ok { - if bytes.Contains(req.Key, testKey) && !client.TestingIsRangeLookupRequest(req) { + if bytes.Contains(req.Key, testKey) && !kv.TestingIsRangeLookupRequest(req) { atomic.AddInt32(&restartDone, 1) // Return ReadWithinUncertaintyIntervalError to update the transaction timestamp on retry. txn := args.Hdr.Txn @@ -1254,7 +1254,7 @@ func TestFlushUncommitedDescriptorCacheOnRestart(t *testing.T) { } if req, ok := args.Req.(*roachpb.ScanRequest); ok { - if bytes.Contains(req.Key, testKey) && !client.TestingIsRangeLookupRequest(req) { + if bytes.Contains(req.Key, testKey) && !kv.TestingIsRangeLookupRequest(req) { atomic.AddInt32(&restartDone, 1) // Return ReadWithinUncertaintyIntervalError. txn := args.Hdr.Txn diff --git a/pkg/sql/txn_state.go b/pkg/sql/txn_state.go index 4c4f45d94178..58a6d7795097 100644 --- a/pkg/sql/txn_state.go +++ b/pkg/sql/txn_state.go @@ -14,7 +14,7 @@ import ( "context" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -49,7 +49,7 @@ type txnState struct { mu struct { syncutil.RWMutex - txn *client.Txn + txn *kv.Txn } // connCtx is the connection's context. This is the parent of Ctx. @@ -141,7 +141,7 @@ func (ts *txnState) resetForNewSQLTxn( historicalTimestamp *hlc.Timestamp, priority roachpb.UserPriority, readOnly tree.ReadWriteMode, - txn *client.Txn, + txn *kv.Txn, tranCtx transitionCtx, ) { // Reset state vars to defaults. @@ -199,7 +199,7 @@ func (ts *txnState) resetForNewSQLTxn( ts.mon.Start(ts.Ctx, tranCtx.connMon, mon.BoundAccount{} /* reserved */) ts.mu.Lock() if txn == nil { - ts.mu.txn = client.NewTxnWithSteppingEnabled(ts.Ctx, tranCtx.db, tranCtx.nodeID) + ts.mu.txn = kv.NewTxnWithSteppingEnabled(ts.Ctx, tranCtx.db, tranCtx.nodeID) ts.mu.txn.SetDebugName(opName) } else { ts.mu.txn = txn @@ -400,7 +400,7 @@ type advanceInfo struct { // transitionCtx is a bag of fields needed by some state machine events. type transitionCtx struct { - db *client.DB + db *kv.DB nodeID roachpb.NodeID clock *hlc.Clock // connMon is the connExecutor's monitor. New transactions will create a child diff --git a/pkg/sql/txn_state_test.go b/pkg/sql/txn_state_test.go index 9b3fba53c190..080addfba134 100644 --- a/pkg/sql/txn_state_test.go +++ b/pkg/sql/txn_state_test.go @@ -17,7 +17,7 @@ import ( "testing" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -39,7 +39,7 @@ var noRewindExpected = CmdPos(-1) type testContext struct { manualClock *hlc.ManualClock clock *hlc.Clock - mockDB *client.DB + mockDB *kv.DB mon mon.BytesMonitor tracer opentracing.Tracer // ctx is mimicking the spirit of a client connection's context @@ -50,7 +50,7 @@ type testContext struct { func makeTestContext() testContext { manual := hlc.NewManualClock(123) clock := hlc.NewClock(manual.UnixNano, time.Nanosecond) - factory := client.MakeMockTxnSenderFactory( + factory := kv.MakeMockTxnSenderFactory( func(context.Context, *roachpb.Transaction, roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { return nil, nil @@ -61,7 +61,7 @@ func makeTestContext() testContext { return testContext{ manualClock: manual, clock: clock, - mockDB: client.NewDB(ambient, factory, clock), + mockDB: kv.NewDB(ambient, factory, clock), mon: mon.MakeMonitor( "test root mon", mon.MemoryResource, @@ -103,7 +103,7 @@ func (tc *testContext) createOpenState(typ txnType) (fsm.State, *txnState) { mon: &txnStateMon, txnAbortCount: metric.NewCounter(MetaTxnAbort), } - ts.mu.txn = client.NewTxn(ctx, tc.mockDB, roachpb.NodeID(1) /* gatewayNodeID */) + ts.mu.txn = kv.NewTxn(ctx, tc.mockDB, roachpb.NodeID(1) /* gatewayNodeID */) state := stateOpen{ ImplicitTxn: fsm.FromBool(typ == implicitTxn), @@ -174,7 +174,7 @@ type expKVTxn struct { maxTSNanos *int64 } -func checkTxn(txn *client.Txn, exp expKVTxn) error { +func checkTxn(txn *kv.Txn, exp expKVTxn) error { if txn == nil { return errors.Errorf("expected a KV txn but found an uninitialized txn") } diff --git a/pkg/sql/zone_config.go b/pkg/sql/zone_config.go index 1a71bb597c9c..b3e214a54f4f 100644 --- a/pkg/sql/zone_config.go +++ b/pkg/sql/zone_config.go @@ -16,8 +16,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" @@ -171,7 +171,7 @@ func ZoneConfigHook( // object ID, index, and partition. func GetZoneConfigInTxn( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, id uint32, index *sqlbase.IndexDescriptor, partition string, @@ -259,7 +259,7 @@ func (p *planner) resolveTableForZone( // specifier points to a table, index or partition, the table part // must be properly normalized already. It is the caller's // responsibility to do this using e.g .resolveTableForZone(). -func resolveZone(ctx context.Context, txn *client.Txn, zs *tree.ZoneSpecifier) (sqlbase.ID, error) { +func resolveZone(ctx context.Context, txn *kv.Txn, zs *tree.ZoneSpecifier) (sqlbase.ID, error) { errMissingKey := errors.New("missing key") id, err := zonepb.ResolveZoneSpecifier(zs, func(parentID uint32, name string) (uint32, error) { @@ -314,7 +314,7 @@ func resolveSubzone( func deleteRemovedPartitionZoneConfigs( ctx context.Context, - txn *client.Txn, + txn *kv.Txn, tableDesc *sqlbase.TableDescriptor, idxDesc *sqlbase.IndexDescriptor, oldPartDesc *sqlbase.PartitioningDescriptor, diff --git a/pkg/sql/zone_config_test.go b/pkg/sql/zone_config_test.go index a8e3ec5e238f..d2163d787551 100644 --- a/pkg/sql/zone_config_test.go +++ b/pkg/sql/zone_config_test.go @@ -16,8 +16,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -50,7 +50,7 @@ func forceNewConfig(t testing.TB, s *server.TestServer) *config.SystemConfig { } // This needs to be done in a transaction with the system trigger set. - if err := s.DB().Txn(context.TODO(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB().Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } @@ -131,7 +131,7 @@ func TestGetZoneConfig(t *testing.T) { } // Verify sql.GetZoneConfigInTxn. - if err := s.DB().Txn(context.Background(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB().Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { _, zoneCfg, subzone, err := sql.GetZoneConfigInTxn(ctx, txn, tc.objectID, &sqlbase.IndexDescriptor{}, tc.partitionName, false) if err != nil { @@ -367,7 +367,7 @@ func TestCascadingZoneConfig(t *testing.T) { } // Verify sql.GetZoneConfigInTxn. - if err := s.DB().Txn(context.Background(), func(ctx context.Context, txn *client.Txn) error { + if err := s.DB().Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { _, zoneCfg, subzone, err := sql.GetZoneConfigInTxn(ctx, txn, tc.objectID, &sqlbase.IndexDescriptor{}, tc.partitionName, false) if err != nil { diff --git a/pkg/sqlmigrations/leasemanager/lease.go b/pkg/sqlmigrations/leasemanager/lease.go index 45f0ef67197a..b6428a375a6f 100644 --- a/pkg/sqlmigrations/leasemanager/lease.go +++ b/pkg/sqlmigrations/leasemanager/lease.go @@ -17,7 +17,7 @@ import ( "fmt" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/uuid" @@ -43,7 +43,7 @@ func (e *LeaseNotAvailableError) Error() string { // LeaseManager provides functionality for acquiring and managing leases // via the kv api. type LeaseManager struct { - db *client.DB + db *kv.DB clock *hlc.Clock clientID string leaseDuration time.Duration @@ -67,7 +67,7 @@ type Options struct { } // New allocates a new LeaseManager. -func New(db *client.DB, clock *hlc.Clock, options Options) *LeaseManager { +func New(db *kv.DB, clock *hlc.Clock, options Options) *LeaseManager { if options.ClientID == "" { options.ClientID = uuid.MakeV4().String() } @@ -94,7 +94,7 @@ func (m *LeaseManager) AcquireLease(ctx context.Context, key roachpb.Key) (*Leas key: key, } lease.val.sem = make(chan struct{}, 1) - if err := m.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := m.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { var val LeaseVal err := txn.GetProto(ctx, key, &val) if err != nil { diff --git a/pkg/sqlmigrations/migrations.go b/pkg/sqlmigrations/migrations.go index d688cdce626c..fcb899eca863 100644 --- a/pkg/sqlmigrations/migrations.go +++ b/pkg/sqlmigrations/migrations.go @@ -19,8 +19,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/config/zonepb" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -426,10 +426,10 @@ type leaseManager interface { // db is defined just to allow us to use a fake client.DB when testing this // package. type db interface { - Scan(ctx context.Context, begin, end interface{}, maxRows int64) ([]client.KeyValue, error) - Get(ctx context.Context, key interface{}) (client.KeyValue, error) + Scan(ctx context.Context, begin, end interface{}, maxRows int64) ([]kv.KeyValue, error) + Get(ctx context.Context, key interface{}) (kv.KeyValue, error) Put(ctx context.Context, key, value interface{}) error - Txn(ctx context.Context, retryable func(ctx context.Context, txn *client.Txn) error) error + Txn(ctx context.Context, retryable func(ctx context.Context, txn *kv.Txn) error) error } // Manager encapsulates the necessary functionality for handling migrations @@ -446,7 +446,7 @@ type Manager struct { // NewManager initializes and returns a new Manager object. func NewManager( stopper *stop.Stopper, - db *client.DB, + db *kv.DB, executor *sql.InternalExecutor, clock *hlc.Clock, testingKnobs MigrationManagerTestingKnobs, @@ -667,7 +667,7 @@ func migrationKey(migration migrationDescriptor) roachpb.Key { func createSystemTable(ctx context.Context, r runner, desc sqlbase.TableDescriptor) error { // We install the table at the KV layer so that we can choose a known ID in // the reserved ID space. (The SQL layer doesn't allow this.) - err := r.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() tKey := sqlbase.MakePublicTableNameKey(ctx, r.settings, desc.GetParentID(), desc.GetName()) b.CPut(tKey.Key(), desc.GetID(), nil) @@ -726,7 +726,7 @@ func createProtectedTimestampsRecordsTable(ctx context.Context, r runner) error func createNewSystemNamespaceDescriptor(ctx context.Context, r runner) error { - return r.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + return r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() // Retrieve the existing namespace table's descriptor and change its name to @@ -893,7 +893,7 @@ func initializeClusterSecret(ctx context.Context, r runner) error { func populateVersionSetting(ctx context.Context, r runner) error { var v roachpb.Version - if err := r.db.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + if err := r.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { return txn.GetProto(ctx, keys.BootstrapVersionKey, &v) }); err != nil { return err diff --git a/pkg/sqlmigrations/migrations_test.go b/pkg/sqlmigrations/migrations_test.go index c93d19190df2..8bfe0bd8fdef 100644 --- a/pkg/sqlmigrations/migrations_test.go +++ b/pkg/sqlmigrations/migrations_test.go @@ -20,8 +20,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql" "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" @@ -92,7 +92,7 @@ type fakeDB struct { func (f *fakeDB) Scan( ctx context.Context, begin, end interface{}, maxRows int64, -) ([]client.KeyValue, error) { +) ([]kv.KeyValue, error) { if f.scanErr != nil { return nil, f.scanErr } @@ -102,9 +102,9 @@ func (f *fakeDB) Scan( if !bytes.Equal(end.(roachpb.Key), keys.MigrationKeyMax) { return nil, errors.Errorf("expected end key %q, got %q", keys.MigrationKeyMax, end) } - var results []client.KeyValue + var results []kv.KeyValue for k, v := range f.kvs { - results = append(results, client.KeyValue{ + results = append(results, kv.KeyValue{ Key: []byte(k), Value: &roachpb.Value{RawBytes: v}, }) @@ -112,8 +112,8 @@ func (f *fakeDB) Scan( return results, nil } -func (f *fakeDB) Get(ctx context.Context, key interface{}) (client.KeyValue, error) { - return client.KeyValue{}, errors.New("unimplemented") +func (f *fakeDB) Get(ctx context.Context, key interface{}) (kv.KeyValue, error) { + return kv.KeyValue{}, errors.New("unimplemented") } func (f *fakeDB) Put(ctx context.Context, key, value interface{}) error { @@ -126,7 +126,7 @@ func (f *fakeDB) Put(ctx context.Context, key, value interface{}) error { return nil } -func (f *fakeDB) Txn(context.Context, func(context.Context, *client.Txn) error) error { +func (f *fakeDB) Txn(context.Context, func(context.Context, *kv.Txn) error) error { return errors.New("unimplemented") } @@ -402,7 +402,7 @@ type migrationTest struct { oldMigrations []migrationDescriptor server serverutils.TestServerInterface sqlDB *sqlutils.SQLRunner - kvDB *client.DB + kvDB *kv.DB memMetrics *sql.MemoryMetrics } @@ -725,7 +725,7 @@ func TestMigrateNamespaceTableDescriptors(t *testing.T) { deprecatedKey := sqlbase.MakeDescMetadataKey(keys.DeprecatedNamespaceTableID) desc := &sqlbase.Descriptor{} - require.NoError(t, mt.kvDB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.NoError(t, mt.kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { ts, err := txn.GetProtoTs(ctx, deprecatedKey, desc) require.NoError(t, err) desc.Table(ts).Name = sqlbase.NamespaceTable.Name @@ -735,7 +735,7 @@ func TestMigrateNamespaceTableDescriptors(t *testing.T) { // Run the migration. require.NoError(t, mt.runMigration(ctx, migration)) - require.NoError(t, mt.kvDB.Txn(ctx, func(ctx context.Context, txn *client.Txn) error { + require.NoError(t, mt.kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { // Check that the persisted descriptors now match our in-memory versions, // ignoring create and modification times. { diff --git a/pkg/testutils/localtestcluster/local_test_cluster.go b/pkg/testutils/localtestcluster/local_test_cluster.go index 13da93a297fb..efe5d4353758 100644 --- a/pkg/testutils/localtestcluster/local_test_cluster.go +++ b/pkg/testutils/localtestcluster/local_test_cluster.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/tscache" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -56,8 +56,8 @@ type LocalTestCluster struct { Eng storage.Engine Store *kvserver.Store StoreTestingKnobs *kvserver.StoreTestingKnobs - DBContext *client.DBContext - DB *client.DB + DBContext *kv.DBContext + DB *kv.DB Stores *kvserver.Stores Stopper *stop.Stopper Latency time.Duration // sleep for each RPC sent @@ -87,10 +87,10 @@ type InitFactoryFn func( tracer opentracing.Tracer, clock *hlc.Clock, latency time.Duration, - stores client.Sender, + stores kv.Sender, stopper *stop.Stopper, gossip *gossip.Gossip, -) client.TxnSenderFactory +) kv.TxnSenderFactory // Start starts the test cluster by bootstrapping an in-memory store // (defaults to maximum of 50M). The server is started, launching the @@ -126,12 +126,12 @@ func (ltc *LocalTestCluster) Start(t testing.TB, baseCtx *base.Config, initFacto factory := initFactory(cfg.Settings, nodeDesc, ambient.Tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper, ltc.Gossip) if ltc.DBContext == nil { - dbCtx := client.DefaultDBContext() + dbCtx := kv.DefaultDBContext() dbCtx.Stopper = ltc.Stopper ltc.DBContext = &dbCtx } ltc.DBContext.NodeID.Set(context.Background(), nodeID) - ltc.DB = client.NewDBWithContext(cfg.AmbientCtx, factory, ltc.Clock, *ltc.DBContext) + ltc.DB = kv.NewDBWithContext(cfg.AmbientCtx, factory, ltc.Clock, *ltc.DBContext) transport := kvserver.NewDummyRaftTransport(cfg.Settings) // By default, disable the replica scanner and split queue, which // confuse tests using LocalTestCluster. diff --git a/pkg/testutils/serverutils/test_server_shim.go b/pkg/testutils/serverutils/test_server_shim.go index be71e9cabe84..3704b1d920cd 100644 --- a/pkg/testutils/serverutils/test_server_shim.go +++ b/pkg/testutils/serverutils/test_server_shim.go @@ -25,7 +25,7 @@ import ( "testing" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/security" @@ -65,7 +65,7 @@ type TestServerInterface interface { SQLAddr() string // DB returns a *client.DB instance for talking to this KV server. - DB() *client.DB + DB() *kv.DB // RPCContext returns the rpc context used by the test server. RPCContext() *rpc.Context @@ -192,7 +192,7 @@ func InitTestServerFactory(impl TestServerFactory) { // The server should be stopped by calling server.Stopper().Stop(). func StartServer( t testing.TB, params base.TestServerArgs, -) (TestServerInterface, *gosql.DB, *client.DB) { +) (TestServerInterface, *gosql.DB, *kv.DB) { server, err := StartServerRaw(params) if err != nil { t.Fatal(err) diff --git a/pkg/ts/db.go b/pkg/ts/db.go index 708123a1a030..06e731926434 100644 --- a/pkg/ts/db.go +++ b/pkg/ts/db.go @@ -15,7 +15,7 @@ import ( "fmt" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -83,7 +83,7 @@ var Resolution30mStorageTTL = settings.RegisterPublicDurationSetting( // DB provides Cockroach's Time Series API. type DB struct { - db *client.DB + db *kv.DB st *cluster.Settings metrics *TimeSeriesMetrics @@ -99,7 +99,7 @@ type DB struct { } // NewDB creates a new DB instance. -func NewDB(db *client.DB, settings *cluster.Settings) *DB { +func NewDB(db *kv.DB, settings *cluster.Settings) *DB { pruneThresholdByResolution := map[Resolution]func() int64{ Resolution10s: func() int64 { return Resolution10sStorageTTL.Get(&settings.SV).Nanoseconds() @@ -294,7 +294,7 @@ func (db *DB) tryStoreRollup(ctx context.Context, r Resolution, data []rollupDat } func (db *DB) storeKvs(ctx context.Context, kvs []roachpb.KeyValue) error { - b := &client.Batch{} + b := &kv.Batch{} for _, kv := range kvs { b.AddRawRequest(&roachpb.MergeRequest{ RequestHeader: roachpb.RequestHeader{ diff --git a/pkg/ts/maintenance.go b/pkg/ts/maintenance.go index 42a9622c4923..cc5ecac0c441 100644 --- a/pkg/ts/maintenance.go +++ b/pkg/ts/maintenance.go @@ -13,7 +13,7 @@ package ts import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" @@ -49,7 +49,7 @@ func (tsdb *DB) MaintainTimeSeries( ctx context.Context, snapshot storage.Reader, start, end roachpb.RKey, - db *client.DB, + db *kv.DB, mem *mon.BytesMonitor, budgetBytes int64, now hlc.Timestamp, diff --git a/pkg/ts/pruning.go b/pkg/ts/pruning.go index 35b8cbc78b89..8d39ef162dff 100644 --- a/pkg/ts/pruning.go +++ b/pkg/ts/pruning.go @@ -13,8 +13,8 @@ package ts import ( "context" - "github.com/cockroachdb/cockroach/pkg/internal/client" "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -111,11 +111,11 @@ func (tsdb *DB) findTimeSeries( // As range deletion of inline data is an idempotent operation, it is safe to // run this operation concurrently on multiple nodes at the same time. func (tsdb *DB) pruneTimeSeries( - ctx context.Context, db *client.DB, timeSeriesList []timeSeriesResolutionInfo, now hlc.Timestamp, + ctx context.Context, db *kv.DB, timeSeriesList []timeSeriesResolutionInfo, now hlc.Timestamp, ) error { thresholds := tsdb.computeThresholds(now.WallTime) - b := &client.Batch{} + b := &kv.Batch{} for _, timeSeries := range timeSeriesList { // Time series data for a specific resolution falls in a contiguous key // range, and can be deleted with a DelRange command. diff --git a/pkg/ts/query.go b/pkg/ts/query.go index 449c1dba4941..788a9f78fb7b 100644 --- a/pkg/ts/query.go +++ b/pkg/ts/query.go @@ -17,7 +17,7 @@ import ( "sort" "time" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/ts/tspb" "github.com/cockroachdb/cockroach/pkg/util/mon" @@ -549,7 +549,7 @@ func (db *DB) queryChunk( diskTimespan := timespan diskTimespan.expand(mem.InterpolationLimitNanos) - var data []client.KeyValue + var data []kv.KeyValue var err error if len(query.Sources) == 0 { data, err = db.readAllSourcesFromDatabase(ctx, query.Name, diskResolution, diskTimespan) @@ -829,10 +829,10 @@ func (db *DB) readFromDatabase( diskResolution Resolution, timespan QueryTimespan, sources []string, -) ([]client.KeyValue, error) { +) ([]kv.KeyValue, error) { // Iterate over all key timestamps which may contain data for the given // sources, based on the given start/end time and the resolution. - b := &client.Batch{} + b := &kv.Batch{} startTimestamp := diskResolution.normalizeToSlab(timespan.StartNanos) kd := diskResolution.SlabDuration() for currentTimestamp := startTimestamp; currentTimestamp <= timespan.EndNanos; currentTimestamp += kd { @@ -844,7 +844,7 @@ func (db *DB) readFromDatabase( if err := db.db.Run(ctx, b); err != nil { return nil, err } - var rows []client.KeyValue + var rows []kv.KeyValue for _, result := range b.Results { row := result.Rows[0] if row.Value == nil { @@ -861,7 +861,7 @@ func (db *DB) readFromDatabase( // keys, rather than by timespan. func (db *DB) readAllSourcesFromDatabase( ctx context.Context, seriesName string, diskResolution Resolution, timespan QueryTimespan, -) ([]client.KeyValue, error) { +) ([]kv.KeyValue, error) { // Based on the supplied timestamps and resolution, construct start and // end keys for a scan that will return every key with data relevant to // the query. Query slightly before and after the actual queried range @@ -872,7 +872,7 @@ func (db *DB) readAllSourcesFromDatabase( endKey := MakeDataKey( seriesName, "" /* source */, diskResolution, timespan.EndNanos, ).PrefixEnd() - b := &client.Batch{} + b := &kv.Batch{} b.Scan(startKey, endKey) if err := db.db.Run(ctx, b); err != nil { @@ -884,7 +884,7 @@ func (db *DB) readAllSourcesFromDatabase( // convertKeysToSpans converts a batch of KeyValues queried from disk into a // map of data spans organized by source. func convertKeysToSpans( - ctx context.Context, data []client.KeyValue, acc *mon.BoundAccount, + ctx context.Context, data []kv.KeyValue, acc *mon.BoundAccount, ) (map[string]timeSeriesSpan, error) { sourceSpans := make(map[string]timeSeriesSpan) for _, row := range data { diff --git a/pkg/ts/rollup.go b/pkg/ts/rollup.go index 93a95ff3a897..b4a970a240b8 100644 --- a/pkg/ts/rollup.go +++ b/pkg/ts/rollup.go @@ -16,7 +16,7 @@ import ( "sort" "unsafe" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/ts/tspb" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -206,7 +206,7 @@ func (db *DB) queryAndComputeRollupsForSpan( rollupDataMap map[string]rollupData, qmc QueryMemoryContext, ) (roachpb.Span, error) { - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = qmc.GetMaxRollupSlabs(series.Resolution) b.Scan(span.Key, span.EndKey) if err := db.db.Run(ctx, b); err != nil { diff --git a/pkg/ts/server.go b/pkg/ts/server.go index 2457159d4938..03d9cf45a715 100644 --- a/pkg/ts/server.go +++ b/pkg/ts/server.go @@ -14,7 +14,7 @@ import ( "context" "math" - "github.com/cockroachdb/cockroach/pkg/internal/client" + "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/ts/tspb" @@ -320,7 +320,7 @@ func (s *Server) Dump(req *tspb.DumpRequest, stream tspb.TimeSeries_DumpServer) } for span != nil { - b := &client.Batch{} + b := &kv.Batch{} b.Header.MaxSpanRequestKeys = dumpBatchSize b.Scan(span.Key, span.EndKey) err := s.db.db.Run(ctx, b)