diff --git a/pkg/ccl/backupccl/backup_intents_test.go b/pkg/ccl/backupccl/backup_intents_test.go index 79282eb9b915..a6f4b7523a83 100644 --- a/pkg/ccl/backupccl/backup_intents_test.go +++ b/pkg/ccl/backupccl/backup_intents_test.go @@ -43,7 +43,7 @@ func TestCleanupIntentsDuringBackupPerformanceRegression(t *testing.T) { // Interceptor catches requests that cleanup transactions of size 1000 which are // test data transactions. All other transaction commits pass though. - interceptor := func(ctx context.Context, req roachpb.BatchRequest) *roachpb.Error { + interceptor := func(ctx context.Context, req *roachpb.BatchRequest) *roachpb.Error { endTxn := req.Requests[0].GetEndTxn() if endTxn != nil && !endTxn.Commit && len(endTxn.LockSpans) == perTransactionRowCount { // If this is a rollback of one the test's SQL transactions, allow the diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index dfc5010fa26f..31370d17512a 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -6170,7 +6170,7 @@ func TestRestoreErrorPropagates(t *testing.T) { jobsTableKey := keys.SystemSQLCodec.TablePrefix(uint32(systemschema.JobsTable.GetID())) var shouldFail, failures int64 params.ServerArgs.Knobs.Store = &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { // Intercept Put and ConditionalPut requests to the jobs table // and, if shouldFail is positive, increment failures and return an // injected error. @@ -6298,7 +6298,7 @@ func TestPaginatedBackupTenant(t *testing.T) { r.EndKey.Equal(r.Key.PrefixEnd()) } params.ServerArgs.Knobs.Store = &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, request roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, request *roachpb.BatchRequest) *roachpb.Error { for _, ru := range request.Requests { if exportRequest, ok := ru.GetInner().(*roachpb.ExportRequest); ok && !isLeasingExportRequest(exportRequest) { @@ -6316,7 +6316,7 @@ func TestPaginatedBackupTenant(t *testing.T) { } return nil }, - TestingResponseFilter: func(ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { + TestingResponseFilter: func(ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { for i, ru := range br.Responses { if exportRequest, ok := ba.Requests[i].GetInner().(*roachpb.ExportRequest); ok && !isLeasingExportRequest(exportRequest) { @@ -7156,7 +7156,7 @@ func TestClientDisconnect(t *testing.T) { blockBackupOrRestore(ctx) }}}, Store: &kvserver.StoreTestingKnobs{ - TestingResponseFilter: func(ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { + TestingResponseFilter: func(ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { for _, ru := range br.Responses { switch ru.GetInner().(type) { case *roachpb.ExportResponse: diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index 3fb5a8c4d5d8..7070cdd97670 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -4830,7 +4830,7 @@ func TestChangefeedProtectedTimestamps(t *testing.T) { } } requestFilter = kvserverbase.ReplicaRequestFilter(func( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) *roachpb.Error { if ba.Txn == nil || ba.Txn.Name != "changefeed backfill" { return nil diff --git a/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads.go b/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads.go index b5d0dd2e7e5a..26715ffc0722 100644 --- a/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads.go +++ b/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads.go @@ -134,7 +134,7 @@ func canSendToFollower( st *cluster.Settings, clock *hlc.Clock, ctPolicy roachpb.RangeClosedTimestampPolicy, - ba roachpb.BatchRequest, + ba *roachpb.BatchRequest, ) bool { return kvserver.BatchCanBeEvaluatedOnFollower(ba) && closedTimestampLikelySufficient(st, clock, ctPolicy, ba.RequiredFrontier()) && diff --git a/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go b/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go index 2f90ec0bd27b..e42f47d21418 100644 --- a/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go +++ b/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go @@ -108,17 +108,17 @@ func TestCanSendToFollower(t *testing.T) { txn.GlobalUncertaintyLimit = ts return txn } - batch := func(txn *roachpb.Transaction, req roachpb.Request) roachpb.BatchRequest { - var ba roachpb.BatchRequest + batch := func(txn *roachpb.Transaction, req roachpb.Request) *roachpb.BatchRequest { + ba := &roachpb.BatchRequest{} ba.Txn = txn ba.Add(req) return ba } - withBatchTimestamp := func(ba roachpb.BatchRequest, ts hlc.Timestamp) roachpb.BatchRequest { + withBatchTimestamp := func(ba *roachpb.BatchRequest, ts hlc.Timestamp) *roachpb.BatchRequest { ba.Timestamp = ts return ba } - withServerSideBatchTimestamp := func(ba roachpb.BatchRequest, ts hlc.Timestamp) roachpb.BatchRequest { + withServerSideBatchTimestamp := func(ba *roachpb.BatchRequest, ts hlc.Timestamp) *roachpb.BatchRequest { ba = withBatchTimestamp(ba, ts) ba.TimestampFromServerClock = (*hlc.ClockTimestamp)(&ts) return ba @@ -126,7 +126,7 @@ func TestCanSendToFollower(t *testing.T) { testCases := []struct { name string - ba roachpb.BatchRequest + ba *roachpb.BatchRequest ctPolicy roachpb.RangeClosedTimestampPolicy disabledEnterprise bool disabledFollowerReads bool @@ -441,11 +441,13 @@ func TestCanSendToFollower(t *testing.T) { }, { name: "non-enterprise", + ba: withBatchTimestamp(batch(nil, &roachpb.GetRequest{}), stale), disabledEnterprise: true, exp: false, }, { name: "follower reads disabled", + ba: withBatchTimestamp(batch(nil, &roachpb.GetRequest{}), stale), disabledFollowerReads: true, exp: false, }, diff --git a/pkg/ccl/streamingccl/streamingest/stream_ingestion_test.go b/pkg/ccl/streamingccl/streamingest/stream_ingestion_test.go index b6a730d0fe4b..accac18569e5 100644 --- a/pkg/ccl/streamingccl/streamingest/stream_ingestion_test.go +++ b/pkg/ccl/streamingccl/streamingest/stream_ingestion_test.go @@ -151,7 +151,7 @@ func TestStreamIngestionJobWithRandomClient(t *testing.T) { }, } params.ServerArgs.Knobs.Store = &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { for _, req := range ba.Requests { switch r := req.GetInner().(type) { case *roachpb.RevertRangeRequest: diff --git a/pkg/cli/zip_test.go b/pkg/cli/zip_test.go index d745f87154a8..911f7fb2edce 100644 --- a/pkg/cli/zip_test.go +++ b/pkg/cli/zip_test.go @@ -264,7 +264,7 @@ func TestUnavailableZip(t *testing.T) { close(closedCh) unavailableCh.Store(closedCh) knobs := &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, _ roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, _ *roachpb.BatchRequest) *roachpb.Error { select { case <-unavailableCh.Load().(chan struct{}): case <-ctx.Done(): diff --git a/pkg/internal/client/requestbatcher/batcher.go b/pkg/internal/client/requestbatcher/batcher.go index 73a725a4c926..90012e3616a2 100644 --- a/pkg/internal/client/requestbatcher/batcher.go +++ b/pkg/internal/client/requestbatcher/batcher.go @@ -526,8 +526,8 @@ func (b *batch) rangeID() roachpb.RangeID { return b.reqs[0].rangeID } -func (b *batch) batchRequest(cfg *Config) roachpb.BatchRequest { - req := roachpb.BatchRequest{ +func (b *batch) batchRequest(cfg *Config) *roachpb.BatchRequest { + req := &roachpb.BatchRequest{ // Preallocate the Requests slice. Requests: make([]roachpb.RequestUnion, 0, len(b.reqs)), } diff --git a/pkg/internal/client/requestbatcher/batcher_test.go b/pkg/internal/client/requestbatcher/batcher_test.go index fa3738a0079c..2ab6be9b40b2 100644 --- a/pkg/internal/client/requestbatcher/batcher_test.go +++ b/pkg/internal/client/requestbatcher/batcher_test.go @@ -37,14 +37,14 @@ type batchResp struct { type batchSend struct { ctx context.Context - ba roachpb.BatchRequest + ba *roachpb.BatchRequest respChan chan<- batchResp } type chanSender chan batchSend func (c chanSender) Send( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { respChan := make(chan batchResp, 1) select { @@ -501,7 +501,7 @@ func TestMaxKeysPerBatchReq(t *testing.T) { s := <-sc assert.Equal(t, int64(5), s.ba.MaxSpanRequestKeys) assert.Len(t, s.ba.Requests, 3) - br := makeResp(&s.ba, spanMap{ + br := makeResp(s.ba, spanMap{ {"d", "g"}: {"d", "g"}, {"a", "d"}: {"c", "d"}, {"b", "m"}: {"c", "m"}, @@ -513,7 +513,7 @@ func TestMaxKeysPerBatchReq(t *testing.T) { s = <-sc assert.Equal(t, int64(5), s.ba.MaxSpanRequestKeys) assert.Len(t, s.ba.Requests, 3) - br = makeResp(&s.ba, spanMap{ + br = makeResp(s.ba, spanMap{ {"d", "g"}: {"e", "g"}, {"c", "d"}: nilResumeSpan, {"c", "m"}: {"e", "m"}, @@ -525,7 +525,7 @@ func TestMaxKeysPerBatchReq(t *testing.T) { s = <-sc assert.Equal(t, int64(5), s.ba.MaxSpanRequestKeys) assert.Len(t, s.ba.Requests, 2) - br = makeResp(&s.ba, spanMap{ + br = makeResp(s.ba, spanMap{ {"e", "g"}: nilResumeSpan, {"e", "m"}: {"h", "m"}, }) @@ -536,7 +536,7 @@ func TestMaxKeysPerBatchReq(t *testing.T) { s = <-sc assert.Equal(t, int64(5), s.ba.MaxSpanRequestKeys) assert.Len(t, s.ba.Requests, 1) - br = makeResp(&s.ba, spanMap{ + br = makeResp(s.ba, spanMap{ {"h", "m"}: nilResumeSpan, }) s.respChan <- batchResp{br: br} diff --git a/pkg/jobs/jobs_test.go b/pkg/jobs/jobs_test.go index 245ec1303aa8..6660feaeb1ef 100644 --- a/pkg/jobs/jobs_test.go +++ b/pkg/jobs/jobs_test.go @@ -2728,7 +2728,7 @@ func TestStartableJobTxnRetry(t *testing.T) { haveInjectedRetry := false params := base.TestServerArgs{} params.Knobs.Store = &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, r roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, r *roachpb.BatchRequest) *roachpb.Error { if r.Txn == nil || r.Txn.Name != txnName { return nil } diff --git a/pkg/kv/bulk/sst_batcher.go b/pkg/kv/bulk/sst_batcher.go index e1bb47c1c188..c5e33a95bb5c 100644 --- a/pkg/kv/bulk/sst_batcher.go +++ b/pkg/kv/bulk/sst_batcher.go @@ -753,7 +753,7 @@ func (b *SSTBatcher) addSSTable( req.SSTTimestampToRequestTimestamp = batchTS } - ba := roachpb.BatchRequest{ + ba := &roachpb.BatchRequest{ Header: roachpb.Header{Timestamp: batchTS, ClientRangeInfo: roachpb.ClientRangeInfo{ExplicitlyRequested: true}}, AdmissionHeader: roachpb.AdmissionHeader{ Priority: int32(admissionpb.BulkNormalPri), diff --git a/pkg/kv/client_test.go b/pkg/kv/client_test.go index 59335438b583..72df627c5abe 100644 --- a/pkg/kv/client_test.go +++ b/pkg/kv/client_test.go @@ -762,7 +762,7 @@ func TestReadConsistencyTypes(t *testing.T) { // Mock out DistSender's sender function to check the read consistency for // outgoing BatchRequests and return an empty reply. factory := kv.NonTransactionalFactoryFunc( - func(_ context.Context, ba roachpb.BatchRequest, + func(_ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if ba.ReadConsistency != rc { return nil, roachpb.NewErrorf("BatchRequest has unexpected ReadConsistency %s", ba.ReadConsistency) @@ -908,7 +908,7 @@ func TestNodeIDAndObservedTimestamps(t *testing.T) { // Mock out sender function to check that created transactions // have the observed timestamp set for the configured node ID. factory := kv.MakeMockTxnSenderFactory( - func(_ context.Context, _ *roachpb.Transaction, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + func(_ context.Context, _ *roachpb.Transaction, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { return ba.CreateReply(), nil }) @@ -1086,7 +1086,7 @@ func TestRollbackWithCanceledContextInsidious(t *testing.T) { key := roachpb.Key("a") ctx, cancel := context.WithCancel(context.Background()) var rollbacks int - storeKnobs.TestingRequestFilter = func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + storeKnobs.TestingRequestFilter = func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if !ba.IsSingleEndTxnRequest() { return nil } diff --git a/pkg/kv/db.go b/pkg/kv/db.go index b9b6ca2080db..d48e8155d758 100644 --- a/pkg/kv/db.go +++ b/pkg/kv/db.go @@ -214,7 +214,7 @@ var _ Sender = &CrossRangeTxnWrapperSender{} // Send implements the Sender interface. func (s *CrossRangeTxnWrapperSender) Send( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if ba.Txn != nil { log.Fatalf(ctx, "CrossRangeTxnWrapperSender can't handle transactional requests") @@ -824,7 +824,7 @@ func sendAndFill(ctx context.Context, send SenderFunc, b *Batch) error { // fails. But send() also returns its own errors, so there's some dancing // here to do because we want to run fillResults() so that the individual // result gets initialized with an error from the corresponding call. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Requests = b.reqs ba.Header = b.Header ba.AdmissionHeader = b.AdmissionHeader @@ -965,14 +965,14 @@ func runTxn(ctx context.Context, txn *Txn, retryable func(context.Context, *Txn) // send runs the specified calls synchronously in a single batch and returns // any errors. Returns (nil, nil) for an empty batch. func (db *DB) send( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { return db.sendUsingSender(ctx, ba, db.NonTransactionalSender()) } // sendUsingSender uses the specified sender to send the batch request. func (db *DB) sendUsingSender( - ctx context.Context, ba roachpb.BatchRequest, sender Sender, + ctx context.Context, ba *roachpb.BatchRequest, sender Sender, ) (*roachpb.BatchResponse, *roachpb.Error) { if len(ba.Requests) == 0 { return nil, nil diff --git a/pkg/kv/kvclient/kvcoord/dist_sender.go b/pkg/kv/kvclient/kvcoord/dist_sender.go index 7f154c63d7ea..5794d9c1cd50 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender.go @@ -182,7 +182,7 @@ var CanSendToFollower = func( _ *cluster.Settings, _ *hlc.Clock, _ roachpb.RangeClosedTimestampPolicy, - _ roachpb.BatchRequest, + _ *roachpb.BatchRequest, ) bool { return false } @@ -774,14 +774,11 @@ func unsetCanForwardReadTimestampFlag(ba *roachpb.BatchRequest) { // When the request spans ranges, it is split by range and a partial // subset of the batch request is sent to affected ranges in parallel. func (ds *DistSender) Send( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { - ds.incrementBatchCounters(&ba) + ds.incrementBatchCounters(ba) - // TODO(nvanbenschoten): This causes ba to escape to the heap. Either - // commit to passing BatchRequests by reference or return an updated - // value from this method instead. - if pErr := ds.initAndVerifyBatch(ctx, &ba); pErr != nil { + if pErr := ds.initAndVerifyBatch(ctx, ba); pErr != nil { return nil, pErr } @@ -801,7 +798,7 @@ func (ds *DistSender) Send( if ba.Txn != nil && ba.Txn.Epoch > 0 && !require1PC { splitET = true } - parts := splitBatchAndCheckForRefreshSpans(&ba, splitET) + parts := splitBatchAndCheckForRefreshSpans(ba, splitET) if len(parts) > 1 && (ba.MaxSpanRequestKeys != 0 || ba.TargetBytes != 0) { // We already verified above that the batch contains only scan requests of the same type. // Such a batch should never need splitting. @@ -811,10 +808,13 @@ func (ds *DistSender) Send( var singleRplChunk [1]*roachpb.BatchResponse rplChunks := singleRplChunk[:0:1] + onePart := len(parts) == 1 errIdxOffset := 0 for len(parts) > 0 { - part := parts[0] - ba.Requests = part + if !onePart { + ba = ba.ShallowCopy() + ba.Requests = parts[0] + } // The minimal key range encompassing all requests contained within. // Local addressing has already been resolved. // TODO(tschottdorf): consider rudimentary validation of the batch here @@ -850,7 +850,8 @@ func (ds *DistSender) Send( } else if require1PC { log.Fatalf(ctx, "required 1PC transaction cannot be split: %s", ba) } - parts = splitBatchAndCheckForRefreshSpans(&ba, true /* split ET */) + parts = splitBatchAndCheckForRefreshSpans(ba, true /* split ET */) + onePart = false // Restart transaction of the last chunk as multiple parts with // EndTxn in the last part. continue @@ -866,9 +867,11 @@ func (ds *DistSender) Send( // Propagate transaction from last reply to next request. The final // update is taken and put into the response's main header. - ba.UpdateTxn(rpl.Txn) rplChunks = append(rplChunks, rpl) parts = parts[1:] + if len(parts) > 0 { + ba.UpdateTxn(rpl.Txn) + } } var reply *roachpb.BatchResponse @@ -926,7 +929,7 @@ type response struct { // method is never invoked recursively, but it is exposed to maintain symmetry // with divideAndSendBatchToRanges. func (ds *DistSender) divideAndSendParallelCommit( - ctx context.Context, ba roachpb.BatchRequest, rs roachpb.RSpan, isReverse bool, batchIdx int, + ctx context.Context, ba *roachpb.BatchRequest, rs roachpb.RSpan, isReverse bool, batchIdx int, ) (br *roachpb.BatchResponse, pErr *roachpb.Error) { // Search backwards, looking for the first pre-commit QueryIntent. swapIdx := -1 @@ -962,7 +965,7 @@ func (ds *DistSender) divideAndSendParallelCommit( // Create a new pre-commit QueryIntent-only batch and issue it // in a non-limited async task. This batch may need to be split // over multiple ranges, so call into divideAndSendBatchToRanges. - qiBa := ba + qiBa := ba.ShallowCopy() qiBa.Requests = swappedReqs[swapIdx+1:] qiRS, err := keys.Range(qiBa.Requests) if err != nil { @@ -971,7 +974,6 @@ func (ds *DistSender) divideAndSendParallelCommit( qiIsReverse := false // QueryIntentRequests do not carry the isReverse flag qiBatchIdx := batchIdx + 1 qiResponseCh := make(chan response, 1) - qiBaCopy := qiBa // avoids escape to heap runTask := ds.rpcContext.Stopper.RunAsyncTask if ds.disableParallelBatches { @@ -1007,6 +1009,7 @@ func (ds *DistSender) divideAndSendParallelCommit( // Adjust the original batch request to ignore the pre-commit // QueryIntent requests. Make sure to determine the request's // new key span. + ba = ba.ShallowCopy() ba.Requests = swappedReqs[:swapIdx+1] rs, err = keys.Range(ba.Requests) if err != nil { @@ -1050,7 +1053,7 @@ func (ds *DistSender) divideAndSendParallelCommit( } // Populate the pre-commit QueryIntent batch response. If we made it // here then we know we can ignore intent missing errors. - qiReply.reply = qiBaCopy.CreateReply() + qiReply.reply = qiBa.CreateReply() for _, ru := range qiReply.reply.Responses { ru.GetQueryIntent().FoundIntent = true } @@ -1091,7 +1094,7 @@ func (ds *DistSender) divideAndSendParallelCommit( func (ds *DistSender) detectIntentMissingDueToIntentResolution( ctx context.Context, txn *roachpb.Transaction, ) (bool, error) { - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Timestamp = ds.clock.Now() ba.Add(&roachpb.QueryTxnRequest{ RequestHeader: roachpb.RequestHeader{ @@ -1187,7 +1190,7 @@ func mergeErrors(pErr1, pErr2 *roachpb.Error) *roachpb.Error { // this method is invoked recursively. func (ds *DistSender) divideAndSendBatchToRanges( ctx context.Context, - ba roachpb.BatchRequest, + ba *roachpb.BatchRequest, rs roachpb.RSpan, isReverse bool, withCommit bool, @@ -1257,7 +1260,7 @@ func (ds *DistSender) divideAndSendBatchToRanges( } } // Make sure the CanForwardReadTimestamp flag is set to false, if necessary. - unsetCanForwardReadTimestampFlag(&ba) + unsetCanForwardReadTimestampFlag(ba) // Make an empty slice of responses which will be populated with responses // as they come in via Combine(). @@ -1371,7 +1374,7 @@ func (ds *DistSender) divideAndSendBatchToRanges( responseCh <- response{pErr: roachpb.NewError(err)} return } - curRangeBatch := ba + curRangeBatch := ba.ShallowCopy() var positions []int curRangeBatch.Requests, positions, seekKey, err = truncationHelper.Truncate(curRangeRS) if len(positions) == 0 && err == nil { @@ -1481,7 +1484,7 @@ func (ds *DistSender) divideAndSendBatchToRanges( // sent. func (ds *DistSender) sendPartialBatchAsync( ctx context.Context, - ba roachpb.BatchRequest, + ba *roachpb.BatchRequest, rs roachpb.RSpan, isReverse bool, withCommit bool, @@ -1513,7 +1516,7 @@ func (ds *DistSender) sendPartialBatchAsync( func slowRangeRPCWarningStr( s *redact.StringBuilder, - ba roachpb.BatchRequest, + ba *roachpb.BatchRequest, dur time.Duration, attempts int64, desc *roachpb.RangeDescriptor, @@ -1554,7 +1557,7 @@ func slowRangeRPCReturnWarningStr(s *redact.StringBuilder, dur time.Duration, at // the ranges in the span and resend to each. func (ds *DistSender) sendPartialBatch( ctx context.Context, - ba roachpb.BatchRequest, + ba *roachpb.BatchRequest, rs roachpb.RSpan, isReverse bool, withCommit bool, @@ -1759,7 +1762,7 @@ func (ds *DistSender) deduceRetryEarlyExitError(ctx context.Context) error { // nextKey is the first key that was not processed. This will be used when // filling up the ResumeSpan's. func fillSkippedResponses( - ba roachpb.BatchRequest, + ba *roachpb.BatchRequest, br *roachpb.BatchResponse, nextKey roachpb.RKey, resumeReason roachpb.ResumeReason, @@ -1929,7 +1932,7 @@ func noMoreReplicasErr(ambiguousErr, lastAttemptErr error) error { // that do not definitively rule out the possibility that the batch could have // succeeded are transformed into AmbiguousResultErrors. func (ds *DistSender) sendToReplicas( - ctx context.Context, ba roachpb.BatchRequest, routing rangecache.EvictionToken, withCommit bool, + ctx context.Context, ba *roachpb.BatchRequest, routing rangecache.EvictionToken, withCommit bool, ) (*roachpb.BatchResponse, error) { desc := routing.Desc() ba.RangeID = desc.RangeID @@ -2177,7 +2180,7 @@ func (ds *DistSender) sendToReplicas( if ds.kvInterceptor != nil { numReplicas := len(desc.Replicas().Descriptors()) - reqInfo := tenantcostmodel.MakeRequestInfo(&ba, numReplicas) + reqInfo := tenantcostmodel.MakeRequestInfo(ba, numReplicas) respInfo := tenantcostmodel.MakeResponseInfo(br, !reqInfo.IsWrite()) if err := ds.kvInterceptor.OnResponseWait(ctx, reqInfo, respInfo); err != nil { return nil, err diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_rangefeed_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_rangefeed_test.go index 131de60eea2e..60894a8e5b8a 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_rangefeed_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_rangefeed_test.go @@ -109,7 +109,7 @@ func (c *countConnectionsTransport) IsExhausted() bool { } func (c *countConnectionsTransport) SendNext( - ctx context.Context, request roachpb.BatchRequest, + ctx context.Context, request *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { return c.wrapped.SendNext(ctx, request) } diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go index c1d1ef4a0a58..9aa37aefc10f 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go @@ -1230,7 +1230,7 @@ func TestMultiRangeScanDeleteRange(t *testing.T) { txn := kv.NewTxnFromProto(ctx, db, s.NodeID(), now, kv.RootTxn, &txnProto) scan := roachpb.NewScan(writes[0], writes[len(writes)-1].Next(), false) - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txnProto} ba.Add(scan) br, pErr := txn.Send(ctx, ba) @@ -1379,7 +1379,7 @@ func TestMultiRangeScanWithPagination(t *testing.T) { numPages++ // Build the batch. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} for _, span := range operations { var req roachpb.Request switch { @@ -1921,7 +1921,7 @@ func TestAsyncAbortPoisons(t *testing.T) { var storeKnobs kvserver.StoreTestingKnobs keyA, keyB := roachpb.Key("a"), roachpb.Key("b") commitCh := make(chan error, 1) - storeKnobs.TestingRequestFilter = func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + storeKnobs.TestingRequestFilter = func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { for _, req := range ba.Requests { switch r := req.GetInner().(type) { case *roachpb.EndTxnRequest: @@ -3105,7 +3105,7 @@ func TestTxnCoordSenderRetries(t *testing.T) { // directly. This should be picked up by the transaction's // QueryIntent when chaining on to the pipelined write to // key "a". - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&roachpb.ResolveIntentRequest{ RequestHeader: roachpb.RequestHeader{ Key: roachpb.Key("a"), @@ -3131,7 +3131,7 @@ func TestTxnCoordSenderRetries(t *testing.T) { // Simulate a failed intent write by resolving the intent // directly. This should be picked up by the transaction's // pre-commit QueryIntent for the pipelined write to key "a". - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&roachpb.ResolveIntentRequest{ RequestHeader: roachpb.RequestHeader{ Key: roachpb.Key("a"), @@ -3518,7 +3518,7 @@ func BenchmarkReturnOnRangeBoundary(b *testing.B) { ctx := context.Background() scanCtx := context.WithValue(ctx, scanKey{}, "scan") - reqFilter := func(ctx context.Context, _ roachpb.BatchRequest) *roachpb.Error { + reqFilter := func(ctx context.Context, _ *roachpb.BatchRequest) *roachpb.Error { if ctx.Value(scanKey{}) != nil && Latency > 0 { time.Sleep(Latency) } diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_test.go index 6ef4d8c3a61c..b58f7d0bccd1 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_test.go @@ -119,13 +119,13 @@ var ( var testAddress = util.NewUnresolvedAddr("tcp", "node1") // simpleSendFn is the function type used to dispatch RPC calls in simpleTransportAdapter. -type simpleSendFn func(context.Context, roachpb.BatchRequest) (*roachpb.BatchResponse, error) +type simpleSendFn func(context.Context, *roachpb.BatchRequest) (*roachpb.BatchResponse, error) // stubRPCSendFn is an rpcSendFn that simply creates a reply for the // BatchRequest without performing an RPC call or triggering any // test instrumentation. var stubRPCSendFn simpleSendFn = func( - _ context.Context, ba roachpb.BatchRequest, + _ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { return ba.CreateReply(), nil } @@ -163,8 +163,9 @@ func (l *simpleTransportAdapter) IsExhausted() bool { } func (l *simpleTransportAdapter) SendNext( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { + ba = ba.ShallowCopy() ba.Replica = l.replicas[l.nextReplicaIdx] l.nextReplicaIdx++ return l.fn(ctx, ba) @@ -375,7 +376,7 @@ func TestSendRPCOrder(t *testing.T) { return nil, err } return adaptSimpleTransport( - func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { return ba.CreateReply(), nil })(opts, dialer, replicas) } @@ -521,7 +522,7 @@ func TestImmutableBatchArgs(t *testing.T) { rpcContext := rpc.NewInsecureTestingContext(ctx, clock, stopper) g := makeGossip(t, stopper, rpcContext) var testFn simpleSendFn = func( - _ context.Context, args roachpb.BatchRequest, + _ context.Context, args *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { reply := args.CreateReply() reply.Txn = args.Txn.Clone() @@ -660,7 +661,7 @@ func TestRetryOnNotLeaseHolderError(t *testing.T) { var retryReplica roachpb.ReplicaDescriptor var testFn simpleSendFn = func( - _ context.Context, args roachpb.BatchRequest, + _ context.Context, args *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { attempts++ reply := &roachpb.BatchResponse{} @@ -744,7 +745,7 @@ func TestBackoffOnNotLeaseHolderErrorDuringTransfer(t *testing.T) { } } var sequences []roachpb.LeaseSequence - var testFn simpleSendFn = func(_ context.Context, args roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + var testFn simpleSendFn = func(_ context.Context, args *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { reply := &roachpb.BatchResponse{} if len(sequences) > 0 { seq := sequences[0] @@ -826,7 +827,7 @@ func TestNoBackoffOnNotLeaseHolderErrorFromFollowerRead(t *testing.T) { _ *cluster.Settings, _ *hlc.Clock, _ roachpb.RangeClosedTimestampPolicy, - ba roachpb.BatchRequest, + ba *roachpb.BatchRequest, ) bool { return true } @@ -836,7 +837,7 @@ func TestNoBackoffOnNotLeaseHolderErrorFromFollowerRead(t *testing.T) { Replica: testUserRangeDescriptor3Replicas.InternalReplicas[1], Sequence: 1, } - testFn := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + testFn := func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { sentTo = append(sentTo, ba.Replica.NodeID) br := ba.CreateReply() if ba.Replica != lease.Replica { @@ -909,7 +910,7 @@ func TestNoBackoffOnNotLeaseHolderErrorWithoutLease(t *testing.T) { // n1 and n2 return an NLHE without lease information, n3 returns success. // Record which replicas the request was sent to. var sentTo []roachpb.NodeID - sendFn := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + sendFn := func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { sentTo = append(sentTo, ba.Replica.NodeID) br := ba.CreateReply() if ba.Replica != replicas[2] { @@ -1016,7 +1017,7 @@ func TestDistSenderMovesOnFromReplicaWithStaleLease(t *testing.T) { // replica, which will return a success. var callsToNode2 int - sendFn := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + sendFn := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { if ba.Replica.NodeID == 2 { callsToNode2++ reply := &roachpb.BatchResponse{} @@ -1139,7 +1140,7 @@ func TestDistSenderIgnoresNLHEBasedOnOldRangeGeneration(t *testing.T) { // routed there. That replica will reply with an NLHE with an old descriptor // generation value, which should make the DistSender try the next replica. var calls []roachpb.NodeID - sendFn := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + sendFn := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { calls = append(calls, ba.Replica.NodeID) if ba.Replica.NodeID == 2 { reply := &roachpb.BatchResponse{} @@ -1245,7 +1246,7 @@ func TestDistSenderRetryOnTransportErrors(t *testing.T) { // how transport errors are retried by dist sender. secondReplicaTried := false - sendFn := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + sendFn := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { if ba.Replica.NodeID == 2 { return nil, errutil.WithMessage( netutil.NewInitialHeartBeatFailedError( @@ -1333,7 +1334,7 @@ func TestDistSenderDownNodeEvictLeaseholder(t *testing.T) { Sequence: 2, } - transport := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + transport := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { switch ba.Replica.StoreID { case 1: assert.Equal(t, desc.Generation, ba.ClientRangeInfo.DescriptorGeneration) @@ -1383,7 +1384,7 @@ func TestDistSenderDownNodeEvictLeaseholder(t *testing.T) { ClosedTimestampPolicy: roachpb.LEAD_FOR_GLOBAL_READS, }) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.RangeID = 1 get := &roachpb.GetRequest{} get.Key = roachpb.Key("a") @@ -1470,7 +1471,7 @@ func TestEvictOnFirstRangeGossip(t *testing.T) { g := makeGossip(t, stopper, rpcContext) sender := func( - _ context.Context, ba roachpb.BatchRequest, + _ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { return ba.CreateReply(), nil } @@ -1628,7 +1629,7 @@ func TestEvictCacheOnError(t *testing.T) { ctx, cancel := context.WithCancel(ctx) - var testFn simpleSendFn = func(ctx context.Context, args roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + var testFn simpleSendFn = func(ctx context.Context, args *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { if !first { return args.CreateReply(), nil } @@ -1702,7 +1703,7 @@ func TestEvictCacheOnUnknownLeaseHolder(t *testing.T) { } var count int32 - testFn := func(_ context.Context, args roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + testFn := func(_ context.Context, args *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { var err error switch count { case 0, 1: @@ -1769,7 +1770,7 @@ func TestRetryOnWrongReplicaError(t *testing.T) { newRangeDescriptor.EndKey = badEndKey descStale := true - var testFn simpleSendFn = func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + var testFn simpleSendFn = func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { rs, err := keys.Range(ba.Requests) if err != nil { t.Fatal(err) @@ -1872,7 +1873,7 @@ func TestRetryOnWrongReplicaErrorWithSuggestion(t *testing.T) { rhsDesc.Generation = staleDesc.Generation + 2 firstLookup := true - var testFn simpleSendFn = func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + var testFn simpleSendFn = func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { rs, err := keys.Range(ba.Requests) if err != nil { panic(err) @@ -2039,7 +2040,7 @@ func TestSendRPCRetry(t *testing.T) { descriptor, ) - var testFn simpleSendFn = func(ctx context.Context, args roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + var testFn simpleSendFn = func(ctx context.Context, args *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { batchReply := &roachpb.BatchResponse{} reply := &roachpb.ScanResponse{} batchReply.Add(reply) @@ -2160,7 +2161,7 @@ func TestDistSenderDescriptorUpdatesOnSuccessfulRPCs(t *testing.T) { } { t.Run("", func(t *testing.T) { descDB := mockRangeDescriptorDBForDescs(TestMetaRangeDescriptor, desc) - var testFn simpleSendFn = func(ctx context.Context, args roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + var testFn simpleSendFn = func(ctx context.Context, args *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { batchReply := &roachpb.BatchResponse{} reply := &roachpb.GetResponse{} batchReply.Add(reply) @@ -2185,7 +2186,7 @@ func TestDistSenderDescriptorUpdatesOnSuccessfulRPCs(t *testing.T) { // Send a request that's going to receive a response with a RangeInfo. k := roachpb.Key("a") get := roachpb.NewGet(k, false /* forUpdate */) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(get) _, pErr := ds.Send(ctx, ba) require.Nil(t, pErr) @@ -2255,7 +2256,7 @@ func TestSendRPCRangeNotFoundError(t *testing.T) { seen := map[roachpb.ReplicaID]struct{}{} var leaseholderStoreID roachpb.StoreID var ds *DistSender - var testFn simpleSendFn = func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + var testFn simpleSendFn = func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { br := ba.CreateReply() if _, ok := seen[ba.Replica.ReplicaID]; ok { br.Error = roachpb.NewErrorf("visited replica %+v twice", ba.Replica) @@ -2341,7 +2342,7 @@ func TestMultiRangeGapReverse(t *testing.T) { } sender := kv.SenderFunc( - func(_ context.Context, args roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + func(_ context.Context, args *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { rb := args.CreateReply() return rb, nil }) @@ -2389,7 +2390,7 @@ func TestMultiRangeGapReverse(t *testing.T) { 1, // coordinatorNodeID ) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Txn = &txn ba.Add(roachpb.NewReverseScan(splits[0], splits[1], false)) ba.Add(roachpb.NewReverseScan(splits[2], splits[3], false)) @@ -2450,7 +2451,7 @@ func TestMultiRangeMergeStaleDescriptor(t *testing.T) { {Key: roachpb.Key("a"), Value: roachpb.MakeValueFromString("1")}, {Key: roachpb.Key("c"), Value: roachpb.MakeValueFromString("2")}, } - testFn := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + testFn := func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { rs, err := keys.Range(ba.Requests) if err != nil { t.Fatal(err) @@ -2590,7 +2591,7 @@ func TestClockUpdateOnResponse(t *testing.T) { // Test timestamp propagation on valid BatchResults. fakeTime := ds.clock.Now().Add(10000000000 /*10s*/, 0).UnsafeToClockTimestamp() replyNormal := kv.SenderFunc( - func(_ context.Context, args roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + func(_ context.Context, args *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { rb := args.CreateReply() rb.Now = fakeTime return rb, nil @@ -2600,7 +2601,7 @@ func TestClockUpdateOnResponse(t *testing.T) { // Test timestamp propagation on errors. fakeTime = ds.clock.Now().Add(10000000000 /*10s*/, 0).UnsafeToClockTimestamp() replyError := kv.SenderFunc( - func(_ context.Context, _ roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + func(_ context.Context, _ *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { pErr := expectedErr pErr.Now = fakeTime return nil, pErr @@ -2675,7 +2676,7 @@ func TestTruncateWithSpanAndDescriptor(t *testing.T) { // requests. Because of parallelization, there's no guarantee // on the ordering of requests. var haveA, haveB bool - sendStub := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + sendStub := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { rs, err := keys.Range(ba.Requests) if err != nil { t.Fatal(err) @@ -2715,7 +2716,7 @@ func TestTruncateWithSpanAndDescriptor(t *testing.T) { // In the second attempt, The range of the descriptor found in // the cache is ["a", "c"), but the put on "a" will not be // present. The request is truncated to contain only the put on "b". - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Txn = &roachpb.Transaction{Name: "test"} { val := roachpb.MakeValueFromString("val") @@ -2803,7 +2804,7 @@ func TestTruncateWithLocalSpanAndDescriptor(t *testing.T) { // Define our rpcSend stub which checks the span of the batch // requests. haveRequest := []bool{false, false, false} - sendStub := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + sendStub := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { h := ba.Requests[0].GetInner().Header() if h.Key.Equal(keys.RangeDescriptorKey(roachpb.RKey("a"))) && h.EndKey.Equal(keys.MakeRangeKeyPrefix(roachpb.RKey("b"))) { haveRequest[0] = true @@ -2842,7 +2843,7 @@ func TestTruncateWithLocalSpanAndDescriptor(t *testing.T) { // In the second attempt, The range of the descriptor found in // the cache is ["b", "d"), The request is truncated to contain // only the scan on local keys that address from "b" to "d". - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Txn = &roachpb.Transaction{Name: "test"} ba.Add(roachpb.NewScan( keys.RangeDescriptorKey(roachpb.RKey("a")), @@ -3003,7 +3004,7 @@ func TestMultiRangeWithEndTxn(t *testing.T) { for i, test := range testCases { var act [][]roachpb.Method - testFn := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + testFn := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { var cur []roachpb.Method for _, union := range ba.Requests { cur = append(cur, union.GetInner().Method()) @@ -3027,7 +3028,7 @@ func TestMultiRangeWithEndTxn(t *testing.T) { ds.DisableParallelBatches() // Send a batch request containing two puts. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Txn = &roachpb.Transaction{Name: "test"} ba.Add(roachpb.NewPut(test.put1, roachpb.MakeValueFromString("val1"))) ba.Add(roachpb.NewPut(test.put2, roachpb.MakeValueFromString("val2"))) @@ -3136,7 +3137,7 @@ func TestParallelCommitSplitFromQueryIntents(t *testing.T) { for _, test := range testCases { t.Run(test.name, func(t *testing.T) { var act [][]roachpb.Method - testFn := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + testFn := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { var cur []roachpb.Method for _, union := range ba.Requests { cur = append(cur, union.GetInner().Method()) @@ -3160,7 +3161,7 @@ func TestParallelCommitSplitFromQueryIntents(t *testing.T) { ds.DisableParallelBatches() // Send a batch request containing the requests. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Txn = &roachpb.Transaction{Name: "test"} ba.Add(test.reqs...) @@ -3247,7 +3248,7 @@ func TestParallelCommitsDetectIntentMissingCause(t *testing.T) { } for _, test := range testCases { t.Run(test.name, func(t *testing.T) { - testFn := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + testFn := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { br := ba.CreateReply() switch ba.Requests[0].GetInner().Method() { case roachpb.QueryIntent: @@ -3288,7 +3289,7 @@ func TestParallelCommitsDetectIntentMissingCause(t *testing.T) { ds := NewDistSender(cfg) // Send a parallel commit batch request. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Txn = txn.Clone() ba.Add(&roachpb.QueryIntentRequest{ RequestHeader: roachpb.RequestHeader{Key: key}, @@ -3409,7 +3410,7 @@ func TestSenderTransport(t *testing.T) { kv.SenderFunc( func( _ context.Context, - _ roachpb.BatchRequest, + _ *roachpb.BatchRequest, ) (r *roachpb.BatchResponse, e *roachpb.Error) { return }, @@ -3417,7 +3418,7 @@ func TestSenderTransport(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = transport.SendNext(context.Background(), roachpb.BatchRequest{}) + _, err = transport.SendNext(context.Background(), &roachpb.BatchRequest{}) if err != nil { t.Fatal(err) } @@ -3447,7 +3448,7 @@ func TestGatewayNodeID(t *testing.T) { } var observedNodeID roachpb.NodeID - testFn := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + testFn := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { observedNodeID = ba.Header.GatewayNodeID return ba.CreateReply(), nil } @@ -3464,7 +3465,7 @@ func TestGatewayNodeID(t *testing.T) { Settings: cluster.MakeTestingClusterSettings(), } ds := NewDistSender(cfg) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(roachpb.NewPut(roachpb.Key("a"), roachpb.MakeValueFromString("value"))) if _, err := ds.Send(context.Background(), ba); err != nil { t.Fatalf("put encountered error: %s", err) @@ -3641,7 +3642,7 @@ func TestMultipleErrorsMerged(t *testing.T) { tc.err2 = err1 } - testFn := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + testFn := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { reply := ba.CreateReply() if delRng := ba.Requests[0].GetDeleteRange(); delRng == nil { return nil, errors.Errorf("expected DeleteRange request, found %v", ba.Requests[0]) @@ -3677,7 +3678,7 @@ func TestMultipleErrorsMerged(t *testing.T) { } ds := NewDistSender(cfg) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Txn = txn.Clone() ba.Add(roachpb.NewDeleteRange(roachpb.Key("a"), roachpb.Key("c"), false /* returnKeys */)) @@ -3788,7 +3789,7 @@ func TestErrorIndexAlignment(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) { nthRequest := 0 - var testFn simpleSendFn = func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + var testFn simpleSendFn = func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { reply := ba.CreateReply() if nthRequest == tc.nthPartialBatch { reply.Error = roachpb.NewErrorf("foo") @@ -3815,7 +3816,7 @@ func TestErrorIndexAlignment(t *testing.T) { ds := NewDistSender(cfg) ds.DisableParallelBatches() - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Txn = &roachpb.Transaction{Name: "test"} // First batch has 1 request. val := roachpb.MakeValueFromString("val") @@ -3859,7 +3860,7 @@ func TestCanSendToFollower(t *testing.T) { _ *cluster.Settings, _ *hlc.Clock, _ roachpb.RangeClosedTimestampPolicy, - ba roachpb.BatchRequest, + ba *roachpb.BatchRequest, ) bool { return !ba.IsLocking() && canSend } @@ -3878,7 +3879,7 @@ func TestCanSendToFollower(t *testing.T) { } } var sentTo roachpb.ReplicaDescriptor - testFn := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + testFn := func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { sentTo = ba.Replica return ba.CreateReply(), nil } @@ -4022,7 +4023,7 @@ func TestEvictMetaRange(t *testing.T) { isStale := false - testFn := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + testFn := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { rs, err := keys.Range(ba.Requests) if err != nil { t.Fatal(err) @@ -4204,7 +4205,7 @@ func TestConnectionClass(t *testing.T) { ) (Transport, error) { class = opts.class return adaptSimpleTransport( - func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { return ba.CreateReply(), nil })(opts, dialer, replicas) } @@ -4237,7 +4238,7 @@ func TestConnectionClass(t *testing.T) { keys.SystemSQLCodec.TablePrefix(1234), // A non-system table } { t.Run(key.String(), func(t *testing.T) { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&roachpb.GetRequest{ RequestHeader: roachpb.RequestHeader{ Key: key, @@ -4290,7 +4291,7 @@ func TestEvictionTokenCoalesce(t *testing.T) { var queriedMetaKeys sync.Map var ds *DistSender - testFn := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + testFn := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { rs, err := keys.Range(ba.Requests) br := ba.CreateReply() if err != nil { @@ -4388,7 +4389,7 @@ func TestDistSenderSlowLogMessage(t *testing.T) { dur = 8158 * time.Millisecond attempts = 120 ) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} get := &roachpb.GetRequest{} get.Key = roachpb.Key("a") ba.Add(get) @@ -4497,7 +4498,7 @@ func TestRequestSubdivisionAfterDescriptorChange(t *testing.T) { } returnErr := true - transportFn := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + transportFn := func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { if returnErr { // First time around we return an RPC error. Next time around, make sure // the DistSender tries gets the split descriptors. @@ -4541,7 +4542,7 @@ func TestRequestSubdivisionAfterDescriptorChange(t *testing.T) { // moment on, we check that the sent batches only consist of single requests - // which proves that the original batch was split. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(tc.req1(keyA), tc.req2(keyC)) // Inconsistent read because otherwise the batch will ask to be re-sent in a // txn when split. @@ -4602,7 +4603,7 @@ func TestRequestSubdivisionAfterDescriptorChangeWithUnavailableReplicasTerminate splitRDB := mockRangeDescriptorDBForDescs(splitDescs...) var numAttempts int32 - transportFn := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + transportFn := func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { atomic.AddInt32(&numAttempts, 1) require.Equal(t, 1, len(ba.Requests)) return nil, newSendError("boom") @@ -4625,7 +4626,7 @@ func TestRequestSubdivisionAfterDescriptorChangeWithUnavailableReplicasTerminate ds := NewDistSender(cfg) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(get(keyA), get(keyC)) // Inconsistent read because otherwise the batch will ask to be re-sent in a // txn when split. @@ -4763,7 +4764,7 @@ func TestDescriptorChangeAfterRequestSubdivision(t *testing.T) { } var successes int32 - transportFn := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + transportFn := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { require.Len(t, ba.Requests, 1) switch ba.ClientRangeInfo.DescriptorGeneration { case 1: @@ -4799,7 +4800,7 @@ func TestDescriptorChangeAfterRequestSubdivision(t *testing.T) { // from the cache. Then, we'll switch the descriptor db that the DistSender // uses to the version that returns four ranges. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(tc.req1(keyA), tc.req2(keyE)) // Inconsistent read because otherwise the batch will ask to be re-sent in a // txn when split. @@ -4977,7 +4978,7 @@ func TestSendToReplicasSkipsStaleReplicas(t *testing.T) { tok := rc.MakeEvictionToken(&ent) numCalled := 0 - transportFn := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + transportFn := func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { numCalled++ nlhe := &roachpb.NotLeaseHolderError{ RangeID: tc.initialDesc.RangeID, @@ -5025,7 +5026,7 @@ func TestSendToReplicasSkipsStaleReplicas(t *testing.T) { ds := NewDistSender(cfg) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} get := &roachpb.GetRequest{} get.Key = roachpb.Key("a") ba.Add(get) @@ -5097,7 +5098,7 @@ func TestDistSenderDescEvictionAfterLeaseUpdate(t *testing.T) { // We'll send a request that first gets a NLHE, and then a RangeNotFoundError. We // then expect an updated descriptor to be used and return success. call := 0 - var transportFn = func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + var transportFn = func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { br := &roachpb.BatchResponse{} switch call { case 0: @@ -5151,7 +5152,7 @@ func TestDistSenderDescEvictionAfterLeaseUpdate(t *testing.T) { } ds := NewDistSender(cfg) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} get := &roachpb.GetRequest{} get.Key = roachpb.Key("a") ba.Add(get) @@ -5188,7 +5189,7 @@ func TestDistSenderRPCMetrics(t *testing.T) { // We'll send a request that first gets a NLHE, and then a ConditionFailedError. call := 0 - var transportFn = func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + var transportFn = func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { br := &roachpb.BatchResponse{} if call == 0 { br.Error = roachpb.NewError(&roachpb.NotLeaseHolderError{ @@ -5224,7 +5225,7 @@ func TestDistSenderRPCMetrics(t *testing.T) { Replica: desc.Replicas().Descriptors()[0], }, }) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} get := &roachpb.GetRequest{} get.Key = roachpb.Key("a") ba.Add(get) @@ -5295,7 +5296,7 @@ func TestDistSenderNLHEFromUninitializedReplicaDoesNotCauseUnboundedBackoff(t *t } call := 0 - var transportFn = func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + var transportFn = func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { br := &roachpb.BatchResponse{} switch call { case 0: @@ -5358,7 +5359,7 @@ func TestDistSenderNLHEFromUninitializedReplicaDoesNotCauseUnboundedBackoff(t *t } ds := NewDistSender(cfg) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} get := &roachpb.GetRequest{} get.Key = roachpb.Key("a") ba.Add(get) diff --git a/pkg/kv/kvclient/kvcoord/local_test_cluster_util.go b/pkg/kv/kvclient/kvcoord/local_test_cluster_util.go index 08e860be378f..13414de95a51 100644 --- a/pkg/kv/kvclient/kvcoord/local_test_cluster_util.go +++ b/pkg/kv/kvclient/kvcoord/local_test_cluster_util.go @@ -36,7 +36,7 @@ type localTestClusterTransport struct { } func (l *localTestClusterTransport) SendNext( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { if l.latency > 0 { time.Sleep(l.latency) diff --git a/pkg/kv/kvclient/kvcoord/lock_spans_over_budget_error.go b/pkg/kv/kvclient/kvcoord/lock_spans_over_budget_error.go index d019113394c3..eb9c4ecde885 100644 --- a/pkg/kv/kvclient/kvcoord/lock_spans_over_budget_error.go +++ b/pkg/kv/kvclient/kvcoord/lock_spans_over_budget_error.go @@ -31,7 +31,7 @@ type lockSpansOverBudgetError struct { } func newLockSpansOverBudgetError( - lockSpansBytes, limitBytes int64, ba roachpb.BatchRequest, + lockSpansBytes, limitBytes int64, ba *roachpb.BatchRequest, ) lockSpansOverBudgetError { return lockSpansOverBudgetError{ lockSpansBytes: lockSpansBytes, diff --git a/pkg/kv/kvclient/kvcoord/mocks_generated_test.go b/pkg/kv/kvclient/kvcoord/mocks_generated_test.go index efd75362a2db..8c52fd0bef14 100644 --- a/pkg/kv/kvclient/kvcoord/mocks_generated_test.go +++ b/pkg/kv/kvclient/kvcoord/mocks_generated_test.go @@ -106,7 +106,7 @@ func (mr *MockTransportMockRecorder) Release() *gomock.Call { } // SendNext mocks base method. -func (m *MockTransport) SendNext(arg0 context.Context, arg1 roachpb.BatchRequest) (*roachpb.BatchResponse, error) { +func (m *MockTransport) SendNext(arg0 context.Context, arg1 *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SendNext", arg0, arg1) ret0, _ := ret[0].(*roachpb.BatchResponse) diff --git a/pkg/kv/kvclient/kvcoord/replayed_commit_test.go b/pkg/kv/kvclient/kvcoord/replayed_commit_test.go index b9fb2d26db14..08ae5704739c 100644 --- a/pkg/kv/kvclient/kvcoord/replayed_commit_test.go +++ b/pkg/kv/kvclient/kvcoord/replayed_commit_test.go @@ -34,11 +34,11 @@ import ( type interceptingTransport struct { kvcoord.Transport - intercept func(context.Context, roachpb.BatchRequest, *roachpb.BatchResponse, error) (*roachpb.BatchResponse, error) + intercept func(context.Context, *roachpb.BatchRequest, *roachpb.BatchResponse, error) (*roachpb.BatchResponse, error) } func (f *interceptingTransport) SendNext( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { br, err := f.Transport.SendNext(ctx, ba) return f.intercept(ctx, ba, br, err) @@ -75,7 +75,7 @@ func TestCommitSanityCheckAssertionFiresOnUndetectedAmbiguousCommit(t *testing.T } return &interceptingTransport{ Transport: tf, - intercept: func(ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse, err error) (*roachpb.BatchResponse, error) { + intercept: func(ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse, err error) (*roachpb.BatchResponse, error) { if err != nil || ba.Txn == nil || br.Txn == nil || ba.Txn.Status != roachpb.PENDING || br.Txn.Status != roachpb.COMMITTED || !keys.ScratchRangeMin.Equal(br.Txn.Key) { diff --git a/pkg/kv/kvclient/kvcoord/send_test.go b/pkg/kv/kvclient/kvcoord/send_test.go index 61dcf55d812b..3892ad88c799 100644 --- a/pkg/kv/kvclient/kvcoord/send_test.go +++ b/pkg/kv/kvclient/kvcoord/send_test.go @@ -158,7 +158,7 @@ func (f *firstNErrorTransport) IsExhausted() bool { func (f *firstNErrorTransport) Release() {} func (f *firstNErrorTransport) SendNext( - _ context.Context, _ roachpb.BatchRequest, + _ context.Context, _ *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { var err error if f.numSent < f.numErrors { @@ -378,5 +378,5 @@ func sendBatch( routing, err := ds.getRoutingInfo(ctx, desc.StartKey, rangecache.EvictionToken{}, false /* useReverseScan */) require.NoError(t, err) - return ds.sendToReplicas(ctx, roachpb.BatchRequest{}, routing, false /* withCommit */) + return ds.sendToReplicas(ctx, &roachpb.BatchRequest{}, routing, false /* withCommit */) } diff --git a/pkg/kv/kvclient/kvcoord/transport.go b/pkg/kv/kvclient/kvcoord/transport.go index 68ea2b7ead00..9397793ed432 100644 --- a/pkg/kv/kvclient/kvcoord/transport.go +++ b/pkg/kv/kvclient/kvcoord/transport.go @@ -64,7 +64,7 @@ type Transport interface { // // SendNext is also in charge of importing the remotely collected spans (if // any) into the local trace. - SendNext(context.Context, roachpb.BatchRequest) (*roachpb.BatchResponse, error) + SendNext(context.Context, *roachpb.BatchRequest) (*roachpb.BatchResponse, error) // NextInternalClient returns the InternalClient to use for making RPC // calls. @@ -178,7 +178,7 @@ func (gt *grpcTransport) IsExhausted() bool { // client is ready. On success, the reply is sent on the channel; // otherwise an error is sent. func (gt *grpcTransport) SendNext( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { r := gt.replicas[gt.nextReplicaIdx] iface, err := gt.NextInternalClient(ctx) @@ -195,7 +195,7 @@ func (gt *grpcTransport) sendBatch( ctx context.Context, nodeID roachpb.NodeID, iface rpc.RestrictedInternalClient, - ba roachpb.BatchRequest, + ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { // Bail out early if the context is already canceled. (GRPC will // detect this pretty quickly, but the first check of the context @@ -208,7 +208,7 @@ func (gt *grpcTransport) sendBatch( if rpc.IsLocal(iface) { gt.opts.metrics.LocalSentCount.Inc(1) } - reply, err := iface.Batch(ctx, &ba) + reply, err := iface.Batch(ctx, ba) // If we queried a remote node, perform extra validation and // import trace spans. if reply != nil && !rpc.IsLocal(iface) { @@ -328,13 +328,14 @@ func (s *senderTransport) IsExhausted() bool { } func (s *senderTransport) SendNext( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { if s.called { panic("called an exhausted transport") } s.called = true + ba = ba.ShallowCopy() ba.Replica = s.replica log.Eventf(ctx, "%v", ba.String()) br, pErr := s.sender.Send(ctx, ba) diff --git a/pkg/kv/kvclient/kvcoord/transport_test.go b/pkg/kv/kvclient/kvcoord/transport_test.go index ec3610569db2..6a1f77292614 100644 --- a/pkg/kv/kvclient/kvcoord/transport_test.go +++ b/pkg/kv/kvclient/kvcoord/transport_test.go @@ -128,7 +128,7 @@ func TestSpanImport(t *testing.T) { server.tr = tracing.SpanFromContext(recCtx).Tracer() - br, err := gt.sendBatch(recCtx, roachpb.NodeID(1), &server, roachpb.BatchRequest{}) + br, err := gt.sendBatch(recCtx, roachpb.NodeID(1), &server, &roachpb.BatchRequest{}) if err != nil { t.Fatal(err) } diff --git a/pkg/kv/kvclient/kvcoord/txn_coord_sender.go b/pkg/kv/kvclient/kvcoord/txn_coord_sender.go index efc7e0083b02..afa7463bf7ab 100644 --- a/pkg/kv/kvclient/kvcoord/txn_coord_sender.go +++ b/pkg/kv/kvclient/kvcoord/txn_coord_sender.go @@ -442,7 +442,7 @@ func generateTxnDeadlineExceededErr( // sendLockedWithElidedEndTxn method, but we would want to confirm // that doing so doesn't cut into the speed-up we see from this fast-path. func (tc *TxnCoordSender) finalizeNonLockingTxnLocked( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) *roachpb.Error { et := ba.Requests[0].GetEndTxn() if et.Commit { @@ -472,7 +472,7 @@ func (tc *TxnCoordSender) finalizeNonLockingTxnLocked( // Send is part of the client.TxnSender interface. func (tc *TxnCoordSender) Send( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // NOTE: The locking here is unusual. Although it might look like it, we are // NOT holding the lock continuously for the duration of the Send. We lock @@ -483,7 +483,7 @@ func (tc *TxnCoordSender) Send( defer tc.mu.Unlock() tc.mu.active = true - if pErr := tc.maybeRejectClientLocked(ctx, &ba); pErr != nil { + if pErr := tc.maybeRejectClientLocked(ctx, ba); pErr != nil { return nil, pErr } @@ -852,7 +852,7 @@ func (tc *TxnCoordSender) handleRetryableErrLocked( // cases. It also updates retryable errors with the updated transaction for use // by client restarts. func (tc *TxnCoordSender) updateStateLocked( - ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse, pErr *roachpb.Error, + ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse, pErr *roachpb.Error, ) *roachpb.Error { // We handle a couple of different cases: @@ -943,7 +943,7 @@ func (tc *TxnCoordSender) updateStateLocked( func sanityCheckErrWithTxn( ctx context.Context, pErrWithTxn *roachpb.Error, - ba roachpb.BatchRequest, + ba *roachpb.BatchRequest, knobs *ClientTestingKnobs, ) error { txn := pErrWithTxn.GetTxn() @@ -1325,7 +1325,7 @@ func (tc *TxnCoordSender) ManualRefresh(ctx context.Context) error { // needs the transaction proto. The function then returns a BatchRequest // with the updated transaction proto. We use this updated proto to call // into updateStateLocked directly. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Txn = tc.mu.txn.Clone() const force = true refreshedBa, pErr := tc.interceptorAlloc.txnSpanRefresher.maybeRefreshPreemptivelyLocked(ctx, ba, force) diff --git a/pkg/kv/kvclient/kvcoord/txn_coord_sender_test.go b/pkg/kv/kvclient/kvcoord/txn_coord_sender_test.go index e41d50aa8e00..69ebb445d53c 100644 --- a/pkg/kv/kvclient/kvcoord/txn_coord_sender_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_coord_sender_test.go @@ -344,7 +344,7 @@ func getTxn(ctx context.Context, txn *kv.Txn) (*roachpb.Transaction, *roachpb.Er Txn: txnMeta, } - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Timestamp = txnMeta.WriteTimestamp ba.Add(qt) @@ -477,7 +477,7 @@ func TestTxnCoordSenderCommitCanceled(t *testing.T) { // a txn ID, and the value is a ready channel (chan struct) that will be // closed when the commit has been received and blocked. var blockCommits sync.Map - responseFilter := func(_ context.Context, ba roachpb.BatchRequest, _ *roachpb.BatchResponse) *roachpb.Error { + responseFilter := func(_ context.Context, ba *roachpb.BatchRequest, _ *roachpb.BatchResponse) *roachpb.Error { if arg, ok := ba.GetArg(roachpb.EndTxn); ok && ba.Txn != nil { et := arg.(*roachpb.EndTxnRequest) readyC, ok := blockCommits.Load(ba.Txn.ID) @@ -529,7 +529,7 @@ func TestTxnCoordSenderCommitCanceled(t *testing.T) { // EndTxn(commit=false) async. We instead replicate what Txn.Rollback() would // do here (i.e. send a EndTxn(commit=false)) and assert that we receive the // expected error. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&roachpb.EndTxnRequest{Commit: false}) _, pErr := txn.Send(ctx, ba) require.NotNil(t, pErr) @@ -672,7 +672,7 @@ func TestTxnCoordSenderGCWithAmbiguousResultErr(t *testing.T) { key := roachpb.Key("a") are := roachpb.NewAmbiguousResultErrorf("very ambiguous") knobs := &kvserver.StoreTestingKnobs{ - TestingResponseFilter: func(ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { + TestingResponseFilter: func(ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { for _, req := range ba.Requests { if putReq, ok := req.GetInner().(*roachpb.PutRequest); ok && putReq.Key.Equal(key) { return roachpb.NewError(are) @@ -851,7 +851,7 @@ func TestTxnCoordSenderTxnUpdatedOnError(t *testing.T) { clock := hlc.NewClock(manual, 20*time.Nanosecond /* maxOffset */) var senderFn kv.SenderFunc = func( - _ context.Context, ba roachpb.BatchRequest, + _ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { var reply *roachpb.BatchResponse pErr := test.pErrGen(ba.Txn) @@ -1046,7 +1046,7 @@ func TestTxnCoordSenderNoDuplicateLockSpans(t *testing.T) { var expectedLockSpans []roachpb.Span - var senderFn kv.SenderFunc = func(_ context.Context, ba roachpb.BatchRequest) ( + var senderFn kv.SenderFunc = func(_ context.Context, ba *roachpb.BatchRequest) ( *roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() br.Txn = ba.Txn.Clone() @@ -1587,7 +1587,7 @@ func TestAbortTransactionOnCommitErrors(t *testing.T) { stopper := stop.NewStopper() defer stopper.Stop(ctx) var senderFn kv.SenderFunc = func( - _ context.Context, ba roachpb.BatchRequest, + _ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() br.Txn = ba.Txn.Clone() @@ -1650,7 +1650,7 @@ type mockSender struct { var _ kv.Sender = &mockSender{} -type matcher func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) +type matcher func(*roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) // match adds a matcher to the list of matchers. func (s *mockSender) match(m matcher) { @@ -1659,7 +1659,7 @@ func (s *mockSender) match(m matcher) { // Send implements the client.Sender interface. func (s *mockSender) Send( - _ context.Context, ba roachpb.BatchRequest, + _ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { for _, m := range s.matchers { br, pErr := m(ba) @@ -1697,7 +1697,7 @@ func TestRollbackErrorStopsHeartbeat(t *testing.T) { ) db := kv.NewDB(ambient, factory, clock, stopper) - sender.match(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender.match(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if _, ok := ba.GetArg(roachpb.EndTxn); !ok { resp := ba.CreateReply() resp.Txn = ba.Txn @@ -1767,7 +1767,7 @@ func TestOnePCErrorTracking(t *testing.T) { keyA, keyB, keyC := roachpb.Key("a"), roachpb.Key("b"), roachpb.Key("c") // Register a matcher catching the commit attempt. - sender.match(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender.match(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if et, ok := ba.GetArg(roachpb.EndTxn); !ok { return nil, nil } else if !et.(*roachpb.EndTxnRequest).Commit { @@ -1776,7 +1776,7 @@ func TestOnePCErrorTracking(t *testing.T) { return nil, roachpb.NewErrorf("injected err") }) // Register a matcher catching the rollback attempt. - sender.match(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender.match(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { et, ok := ba.GetArg(roachpb.EndTxn) if !ok { return nil, nil @@ -1840,7 +1840,7 @@ func TestCommitReadOnlyTransaction(t *testing.T) { defer stopper.Stop(ctx) var calls []roachpb.Method - sender.match(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender.match(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { calls = append(calls, ba.Methods()...) return nil, nil }) @@ -1895,7 +1895,7 @@ func TestCommitMutatingTransaction(t *testing.T) { defer stopper.Stop(ctx) var calls []roachpb.Method - sender.match(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender.match(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() br.Txn = ba.Txn.Clone() @@ -1998,7 +1998,7 @@ func TestAbortReadOnlyTransaction(t *testing.T) { defer stopper.Stop(ctx) var calls []roachpb.Method - sender.match(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender.match(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { calls = append(calls, ba.Methods()...) return nil, nil }) @@ -2039,7 +2039,7 @@ func TestEndWriteRestartReadOnlyTransaction(t *testing.T) { defer stopper.Stop(ctx) var calls []roachpb.Method - sender.match(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender.match(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() br.Txn = ba.Txn.Clone() @@ -2124,7 +2124,7 @@ func TestTransactionKeyNotChangedInRestart(t *testing.T) { keys := []string{"first", "second"} attempt := 0 - sender.match(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender.match(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() br.Txn = ba.Txn.Clone() @@ -2190,7 +2190,7 @@ func TestSequenceNumbers(t *testing.T) { defer stopper.Stop(ctx) var expSequence enginepb.TxnSeq - sender.match(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender.match(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { for _, ru := range ba.Requests { args := ru.GetInner() if args.Method() == roachpb.QueryIntent { @@ -2221,7 +2221,7 @@ func TestSequenceNumbers(t *testing.T) { txn := kv.NewTxn(ctx, db, 0 /* gatewayNodeID */) for i := 0; i < 5; i++ { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} for j := 0; j < i; j++ { ba.Add(roachpb.NewPut(roachpb.Key("a"), roachpb.MakeValueFromString("foo")).(*roachpb.PutRequest)) } @@ -2244,7 +2244,7 @@ func TestConcurrentTxnRequestsProhibited(t *testing.T) { defer stopper.Stop(ctx) putSync := make(chan struct{}) - sender.match(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender.match(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if _, ok := ba.GetArg(roachpb.Put); ok { // Block the Put until the Get runs. putSync <- struct{}{} @@ -2324,7 +2324,7 @@ func TestTxnRequestTxnTimestamp(t *testing.T) { {hlc.Timestamp{WallTime: 20, Logical: 1}, hlc.Timestamp{WallTime: 20, Logical: 1}}, } - sender.match(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender.match(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { req := requests[curReq] if req.expRequestTS != ba.Txn.WriteTimestamp { return nil, roachpb.NewErrorf("%d: expected ts %s got %s", @@ -2366,7 +2366,7 @@ func TestReadOnlyTxnObeysDeadline(t *testing.T) { stopper := stop.NewStopper() defer stopper.Stop(ctx) - sender.match(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender.match(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if _, ok := ba.GetArg(roachpb.Get); ok { manual.Advance(100) br := ba.CreateReply() @@ -2437,7 +2437,7 @@ func TestTxnCoordSenderPipelining(t *testing.T) { var calls []roachpb.Method var senderFn kv.SenderFunc = func( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { calls = append(calls, ba.Methods()...) if et, ok := ba.GetArg(roachpb.EndTxn); ok { @@ -2512,7 +2512,7 @@ func TestAnchorKey(t *testing.T) { key2 := roachpb.Key("b") var senderFn kv.SenderFunc = func( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if !roachpb.Key(ba.Txn.Key).Equal(key2) { t.Fatalf("expected anchor %q, got %q", key2, ba.Txn.Key) @@ -2560,7 +2560,7 @@ func TestLeafTxnClientRejectError(t *testing.T) { // where the first one gets a TransactionAbortedError. errKey := roachpb.Key("a") knobs := &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if g, ok := ba.GetArg(roachpb.Get); ok && g.(*roachpb.GetRequest).Key.Equal(errKey) { txn := ba.Txn.Clone() txn.Status = roachpb.ABORTED @@ -2647,7 +2647,7 @@ func TestPutsInStagingTxn(t *testing.T) { var putInStagingSeen bool var storeKnobs kvserver.StoreTestingKnobs - storeKnobs.TestingRequestFilter = func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + storeKnobs.TestingRequestFilter = func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { put, ok := ba.GetArg(roachpb.Put) if !ok || !put.(*roachpb.PutRequest).Key.Equal(keyB) { return nil @@ -2711,7 +2711,7 @@ func TestTxnManualRefresh(t *testing.T) { pErr *roachpb.Error } type req struct { - ba roachpb.BatchRequest + ba *roachpb.BatchRequest respCh chan resp } type testCase struct { @@ -2872,7 +2872,7 @@ func TestTxnManualRefresh(t *testing.T) { defer stopper.Stop(ctx) reqCh := make(chan req) - var senderFn kv.SenderFunc = func(_ context.Context, ba roachpb.BatchRequest) ( + var senderFn kv.SenderFunc = func(_ context.Context, ba *roachpb.BatchRequest) ( *roachpb.BatchResponse, *roachpb.Error) { r := req{ ba: ba, diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go index 2a5b7573476e..cd132876e3ae 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go @@ -121,7 +121,7 @@ type txnCommitter struct { // SendLocked implements the lockedSender interface. func (tc *txnCommitter) SendLocked( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // If the batch does not include an EndTxn request, pass it through. rArgs, hasET := ba.GetArg(roachpb.EndTxn) @@ -263,7 +263,7 @@ func (tc *txnCommitter) SendLocked( } // validateEndTxnBatch runs sanity checks on a commit or rollback request. -func (tc *txnCommitter) validateEndTxnBatch(ba roachpb.BatchRequest) error { +func (tc *txnCommitter) validateEndTxnBatch(ba *roachpb.BatchRequest) error { // Check that we don't combine a limited DeleteRange with a commit. We cannot // attempt to run such a batch as a 1PC because, if it gets split and thus // doesn't run as a 1PC, resolving the intents will be very expensive. @@ -292,11 +292,12 @@ func (tc *txnCommitter) validateEndTxnBatch(ba roachpb.BatchRequest) error { // The method is used for read-only transactions, which never need to write a // transaction record. func (tc *txnCommitter) sendLockedWithElidedEndTxn( - ctx context.Context, ba roachpb.BatchRequest, et *roachpb.EndTxnRequest, + ctx context.Context, ba *roachpb.BatchRequest, et *roachpb.EndTxnRequest, ) (br *roachpb.BatchResponse, pErr *roachpb.Error) { // Send the batch without its final request, which we know to be the EndTxn // request that we're eliding. If this would result in us sending an empty // batch, mock out a reply instead of sending anything. + ba = ba.ShallowCopy() ba.Requests = ba.Requests[:len(ba.Requests)-1] if len(ba.Requests) > 0 { br, pErr = tc.wrapped.SendLocked(ctx, ba) @@ -350,7 +351,7 @@ const ( // writes, which all should have corresponding QueryIntent requests in the // batch. func (tc *txnCommitter) canCommitInParallel( - ctx context.Context, ba roachpb.BatchRequest, et *roachpb.EndTxnRequest, etAttempt endTxnAttempt, + ctx context.Context, ba *roachpb.BatchRequest, et *roachpb.EndTxnRequest, etAttempt endTxnAttempt, ) bool { if !parallelCommitsEnabled.Get(&tc.st.SV) { return false @@ -499,7 +500,7 @@ func makeTxnCommitExplicitLocked( txn = txn.Clone() // Construct a new batch with just an EndTxn request. - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn} et := roachpb.EndTxnRequest{Commit: true} et.Key = txn.Key diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_committer_test.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_committer_test.go index c0a58c820536..b9c581d9c526 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_committer_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_committer_test.go @@ -57,13 +57,13 @@ func TestTxnCommitterElideEndTxn(t *testing.T) { // Test the case where the EndTxn request is part of a larger batch of // requests. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.EndTxnRequest{Commit: commit, LockSpans: nil}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.IsType(t, &roachpb.GetRequest{}, ba.Requests[0].GetInner()) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[1].GetInner()) @@ -86,7 +86,7 @@ func TestTxnCommitterElideEndTxn(t *testing.T) { ba.Requests = nil ba.Add(&roachpb.EndTxnRequest{Commit: commit, LockSpans: nil}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Fail(t, "should not have issued batch request", ba) return nil, nil }) @@ -116,12 +116,12 @@ func TestTxnCommitterAttachesTxnKey(t *testing.T) { intents := []roachpb.Span{{Key: keyA}} // Verify that the txn key is attached to committing EndTxn requests. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.EndTxnRequest{Commit: true, LockSpans: intents}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.Equal(t, keyA, ba.Requests[0].GetInner().Header().Key) require.Equal(t, roachpb.Key(txn.Key), ba.Requests[1].GetInner().Header().Key) @@ -140,7 +140,7 @@ func TestTxnCommitterAttachesTxnKey(t *testing.T) { ba.Requests = nil ba.Add(&roachpb.EndTxnRequest{Commit: false, LockSpans: intents}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.Equal(t, roachpb.Key(txn.Key), ba.Requests[0].GetInner().Header().Key) @@ -176,7 +176,7 @@ func TestTxnCommitterStripsInFlightWrites(t *testing.T) { // Verify that the QueryIntent and the Put are both attached as lock spans // to the committing EndTxn request when expected. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} qiArgs := roachpb.QueryIntentRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}} putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}} @@ -190,7 +190,7 @@ func TestTxnCommitterStripsInFlightWrites(t *testing.T) { etArgsCopy := etArgs ba.Add(&putArgs, &qiArgs, &etArgsCopy) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[2].GetInner()) @@ -217,7 +217,7 @@ func TestTxnCommitterStripsInFlightWrites(t *testing.T) { etArgsCopy = etArgs ba.Add(&putArgs, &qiArgs, &etArgsCopy) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[2].GetInner()) @@ -247,7 +247,7 @@ func TestTxnCommitterStripsInFlightWrites(t *testing.T) { } ba.Add(&putArgs, &qiArgs, &etArgsWithTrigger) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[2].GetInner()) @@ -278,7 +278,7 @@ func TestTxnCommitterStripsInFlightWrites(t *testing.T) { etArgsWithRangedIntentSpan.InFlightWrites = []roachpb.SequencedWrite{{Key: keyA, Sequence: 1}} ba.Add(&delRngArgs, &qiArgs, &etArgsWithRangedIntentSpan) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[2].GetInner()) @@ -307,7 +307,7 @@ func TestTxnCommitterStripsInFlightWrites(t *testing.T) { etArgsCopy = etArgs ba.Add(&getArgs, &qiArgs, &etArgsCopy) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[2].GetInner()) @@ -343,7 +343,7 @@ func TestTxnCommitterAsyncExplicitCommitTask(t *testing.T) { // Verify that the Put is attached as in-flight write to the committing // EndTxn request. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}} etArgs := roachpb.EndTxnRequest{Commit: true} @@ -357,7 +357,7 @@ func TestTxnCommitterAsyncExplicitCommitTask(t *testing.T) { ba.Header.CanForwardReadTimestamp = true explicitCommitCh := make(chan struct{}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.True(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -375,7 +375,7 @@ func TestTxnCommitterAsyncExplicitCommitTask(t *testing.T) { // Before returning, mock out the sender again to test against the async // task that should be sent to make the implicit txn commit explicit. - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { defer close(explicitCommitCh) require.Len(t, ba.Requests, 1) require.False(t, ba.CanForwardReadTimestamp) @@ -415,7 +415,7 @@ func TestTxnCommitterRetryAfterStaging(t *testing.T) { txn := makeTxnProto() keyA := roachpb.Key("a") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}} etArgs := roachpb.EndTxnRequest{Commit: true} @@ -424,7 +424,7 @@ func TestTxnCommitterRetryAfterStaging(t *testing.T) { etArgs.InFlightWrites = []roachpb.SequencedWrite{{Key: keyA, Sequence: 1}} ba.Add(&putArgs, &etArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[1].GetInner()) @@ -473,7 +473,7 @@ func TestTxnCommitterNoParallelCommitsOnRetry(t *testing.T) { txn := makeTxnProto() keyA := roachpb.Key("a") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}} etArgs := roachpb.EndTxnRequest{Commit: true} @@ -487,7 +487,7 @@ func TestTxnCommitterNoParallelCommitsOnRetry(t *testing.T) { ba.Add(&putArgs, &etArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[1].GetInner()) diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_heartbeater.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_heartbeater.go index b76c0619e250..2aa47c1bb46d 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_heartbeater.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_heartbeater.go @@ -182,10 +182,10 @@ func (h *txnHeartbeater) init( // SendLocked is part of the txnInterceptor interface. func (h *txnHeartbeater) SendLocked( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { etArg, hasET := ba.GetArg(roachpb.EndTxn) - firstLockingIndex, pErr := firstLockingIndex(&ba) + firstLockingIndex, pErr := firstLockingIndex(ba) if pErr != nil { return nil, pErr } @@ -431,7 +431,7 @@ func (h *txnHeartbeater) heartbeatLocked(ctx context.Context) bool { if txn.Key == nil { log.Fatalf(ctx, "attempting to heartbeat txn without anchor key: %v", txn) } - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Txn = txn ba.Add(&roachpb.HeartbeatTxnRequest{ RequestHeader: roachpb.RequestHeader{ @@ -514,7 +514,7 @@ func (h *txnHeartbeater) abortTxnAsyncLocked(ctx context.Context) { // Construct a batch with an EndTxn request. txn := h.mu.txn.Clone() - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn} ba.Add(&roachpb.EndTxnRequest{ Commit: false, diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_heartbeater_test.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_heartbeater_test.go index 46912a702373..1af48edb4c85 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_heartbeater_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_heartbeater_test.go @@ -74,12 +74,12 @@ func TestTxnHeartbeaterSetsTransactionKey(t *testing.T) { // No key is set on a read-only batch. keyA, keyB := roachpb.Key("a"), roachpb.Key("b") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn.Clone()} ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.Equal(t, keyA, ba.Requests[0].GetInner().Header().Key) require.Equal(t, keyB, ba.Requests[1].GetInner().Header().Key) @@ -102,7 +102,7 @@ func TestTxnHeartbeaterSetsTransactionKey(t *testing.T) { ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}}) ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.Equal(t, keyB, ba.Requests[0].GetInner().Header().Key) require.Equal(t, keyA, ba.Requests[1].GetInner().Header().Key) @@ -124,7 +124,7 @@ func TestTxnHeartbeaterSetsTransactionKey(t *testing.T) { ba.Requests = nil ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.Equal(t, keyA, ba.Requests[0].GetInner().Header().Key) @@ -157,7 +157,7 @@ func TestTxnHeartbeaterLoopStartedOnFirstLock(t *testing.T) { // Read-only requests don't start the heartbeat loop. keyA := roachpb.Key("a") keyAHeader := roachpb.RequestHeader{Key: keyA} - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn.Clone()} ba.Add(&roachpb.GetRequest{RequestHeader: keyAHeader}) @@ -192,7 +192,7 @@ func TestTxnHeartbeaterLoopStartedOnFirstLock(t *testing.T) { ba.Requests = nil ba.Add(&roachpb.EndTxnRequest{Commit: true}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[0].GetInner()) @@ -313,7 +313,7 @@ func TestTxnHeartbeaterLoopStartsBeforeExpiry(t *testing.T) { th.mu.Unlock() count := 0 - mockGatekeeper.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockGatekeeper.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.IsType(t, &roachpb.HeartbeatTxnRequest{}, ba.Requests[0].GetInner()) @@ -339,7 +339,7 @@ func TestTxnHeartbeaterLoopStartsBeforeExpiry(t *testing.T) { // The heartbeat loop is started on the first locking request, in this case // a GetForUpdate request. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn.Clone()} keyA := roachpb.Key("a") keyAHeader := roachpb.RequestHeader{Key: keyA} @@ -404,12 +404,12 @@ func TestTxnHeartbeaterLoopStartedFor1PC(t *testing.T) { defer th.stopper.Stop(ctx) keyA := roachpb.Key("a") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn.Clone()} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.EndTxnRequest{Commit: true}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[1].GetInner()) @@ -447,7 +447,7 @@ func TestTxnHeartbeaterLoopRequests(t *testing.T) { var count int var lastTime hlc.Timestamp - mockGatekeeper.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockGatekeeper.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.IsType(t, &roachpb.HeartbeatTxnRequest{}, ba.Requests[0].GetInner()) @@ -466,7 +466,7 @@ func TestTxnHeartbeaterLoopRequests(t *testing.T) { // Kick off the heartbeat loop. keyA := roachpb.Key("a") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn.Clone()} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) @@ -489,7 +489,7 @@ func TestTxnHeartbeaterLoopRequests(t *testing.T) { // Mark the coordinator's transaction record as COMMITTED while a heartbeat // is in-flight. This should cause the heartbeat loop to shut down. th.mu.Lock() - mockGatekeeper.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockGatekeeper.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.IsType(t, &roachpb.HeartbeatTxnRequest{}, ba.Requests[0].GetInner()) @@ -531,7 +531,7 @@ func TestTxnHeartbeaterAsyncAbort(t *testing.T) { defer th.stopper.Stop(ctx) putDone, asyncAbortDone := make(chan struct{}), make(chan struct{}) - mockGatekeeper.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockGatekeeper.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { // Wait for the Put to finish to avoid a data race. <-putDone @@ -551,7 +551,7 @@ func TestTxnHeartbeaterAsyncAbort(t *testing.T) { // Kick off the heartbeat loop. keyA := roachpb.Key("a") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn.Clone()} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) @@ -560,7 +560,7 @@ func TestTxnHeartbeaterAsyncAbort(t *testing.T) { require.NotNil(t, br) // Test that the transaction is rolled back. - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { defer close(asyncAbortDone) require.Len(t, ba.Requests, 1) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[0].GetInner()) @@ -606,7 +606,7 @@ func TestTxnHeartbeaterAsyncAbortWaitsForInFlight(t *testing.T) { // putReady then return an aborted txn and signal hbAborted. putReady := make(chan struct{}) hbAborted := make(chan struct{}) - mockGatekeeper.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockGatekeeper.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { <-putReady defer close(hbAborted) @@ -624,7 +624,7 @@ func TestTxnHeartbeaterAsyncAbortWaitsForInFlight(t *testing.T) { mockSender.ChainMockSend( // Mock a Put, which signals putReady and then waits for putResume // before returning a response. - func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { th.mu.Unlock() // without txnLockGatekeeper, we must unlock manually defer th.mu.Lock() close(putReady) @@ -638,7 +638,7 @@ func TestTxnHeartbeaterAsyncAbortWaitsForInFlight(t *testing.T) { return br, nil }, // Mock an EndTxn, which signals rollbackSent. - func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { defer close(rollbackSent) require.Len(t, ba.Requests, 1) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[0].GetInner()) @@ -657,7 +657,7 @@ func TestTxnHeartbeaterAsyncAbortWaitsForInFlight(t *testing.T) { // Spawn a goroutine to send the Put. require.NoError(t, th.stopper.RunAsyncTask(ctx, "put", func(ctx context.Context) { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn.Clone()} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("a")}}) @@ -696,7 +696,7 @@ func TestTxnHeartbeaterAsyncAbortCollapsesRequests(t *testing.T) { // Mock the heartbeat request, which simply aborts and signals hbAborted. hbAborted := make(chan struct{}) - mockGatekeeper.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockGatekeeper.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { defer close(hbAborted) require.Len(t, ba.Requests, 1) @@ -714,7 +714,7 @@ func TestTxnHeartbeaterAsyncAbortCollapsesRequests(t *testing.T) { rollbackUnblock := make(chan struct{}) mockSender.ChainMockSend( // The first Put request is expected and should just return. - func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -723,7 +723,7 @@ func TestTxnHeartbeaterAsyncAbortCollapsesRequests(t *testing.T) { return br, nil }, // The first EndTxn request from the heartbeater is expected, so block and return. - func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { th.mu.Unlock() // manually unlock for concurrency, no txnLockGatekeeper defer th.mu.Lock() close(rollbackReady) @@ -744,13 +744,13 @@ func TestTxnHeartbeaterAsyncAbortCollapsesRequests(t *testing.T) { }, // The second EndTxn request from the client is unexpected, so // return an error response. - func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { return nil, roachpb.NewError(errors.Errorf("unexpected request: %v", ba)) }, ) // Kick off the heartbeat loop. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn.Clone()} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("a")}}) @@ -772,7 +772,7 @@ func TestTxnHeartbeaterAsyncAbortCollapsesRequests(t *testing.T) { close(rollbackUnblock) })) - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn.Clone()} ba.Add(&roachpb.EndTxnRequest{Commit: false}) @@ -814,7 +814,7 @@ func TestTxnHeartbeaterEndTxnLoopHandling(t *testing.T) { // Kick off the heartbeat loop. key := roachpb.Key("a") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn.Clone()} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: key}}) @@ -824,7 +824,7 @@ func TestTxnHeartbeaterEndTxnLoopHandling(t *testing.T) { require.True(t, heartbeaterRunning(&th), "heartbeat running") // End transaction to validate heartbeat state. - var ba2 roachpb.BatchRequest + ba2 := &roachpb.BatchRequest{} ba2.Header = roachpb.Header{Txn: txn.Clone()} ba2.Add(&roachpb.EndTxnRequest{RequestHeader: roachpb.RequestHeader{Key: key}, Commit: tc.transactionCommit}) diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_metric_recorder.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_metric_recorder.go index 958dc1f485e7..ee2ab945c7ed 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_metric_recorder.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_metric_recorder.go @@ -37,7 +37,7 @@ type txnMetricRecorder struct { // SendLocked is part of the txnInterceptor interface. func (m *txnMetricRecorder) SendLocked( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if m.txnStartNanos == 0 { m.txnStartNanos = timeutil.Now().UnixNano() diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go index 87c039cbf837..efd10fe4abcf 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner.go @@ -258,7 +258,7 @@ func (f rangeIteratorFactory) newRangeIterator() condensableSpanSetRangeIterator // SendLocked implements the lockedSender interface. func (tp *txnPipeliner) SendLocked( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // If an EndTxn request is part of this batch, attach the in-flight writes // and the lock footprint to it. @@ -321,7 +321,7 @@ func (tp *txnPipeliner) SendLocked( // the transaction commits. If it fails, then we'd add the lock spans to our // tracking and exceed the budget. It's easier for this code and more // predictable for the user if we just reject this batch, though. -func (tp *txnPipeliner) maybeRejectOverBudget(ba roachpb.BatchRequest, maxBytes int64) error { +func (tp *txnPipeliner) maybeRejectOverBudget(ba *roachpb.BatchRequest, maxBytes int64) error { // Bail early if the current request is not locking, even if we are already // over budget. In particular, we definitely want to permit rollbacks. We also // want to permit lone commits, since the damage in taking too much memory has @@ -354,8 +354,8 @@ func (tp *txnPipeliner) maybeRejectOverBudget(ba roachpb.BatchRequest, maxBytes // provided batch. It augments these sets with locking requests from the current // batch. func (tp *txnPipeliner) attachLocksToEndTxn( - ctx context.Context, ba roachpb.BatchRequest, -) (roachpb.BatchRequest, *roachpb.Error) { + ctx context.Context, ba *roachpb.BatchRequest, +) (*roachpb.BatchRequest, *roachpb.Error) { args, hasET := ba.GetArg(roachpb.EndTxn) if !hasET { return ba, nil @@ -420,7 +420,7 @@ func (tp *txnPipeliner) attachLocksToEndTxn( // canUseAsyncConsensus checks the conditions necessary for this batch to be // allowed to set the AsyncConsensus flag. -func (tp *txnPipeliner) canUseAsyncConsensus(ctx context.Context, ba roachpb.BatchRequest) bool { +func (tp *txnPipeliner) canUseAsyncConsensus(ctx context.Context, ba *roachpb.BatchRequest) bool { // Short-circuit for EndTransactions; it's common enough to have batches // containing a prefix of writes (which, by themselves, are all eligible for // async consensus) and then an EndTxn (which is not eligible). Note that @@ -493,7 +493,7 @@ func (tp *txnPipeliner) canUseAsyncConsensus(ctx context.Context, ba roachpb.Bat // a write succeeded before depending on its existence. We later prune down the // list of writes we proved to exist that are no longer "in-flight" in // updateLockTracking. -func (tp *txnPipeliner) chainToInFlightWrites(ba roachpb.BatchRequest) roachpb.BatchRequest { +func (tp *txnPipeliner) chainToInFlightWrites(ba *roachpb.BatchRequest) *roachpb.BatchRequest { // If there are no in-flight writes, there's nothing to chain to. if tp.ifWrites.len() == 0 { return ba @@ -518,6 +518,7 @@ func (tp *txnPipeliner) chainToInFlightWrites(ba roachpb.BatchRequest) roachpb.B // We don't want to modify the batch's request slice directly, // so fork it before modifying it. if !forked { + ba = ba.ShallowCopy() ba.Requests = append([]roachpb.RequestUnion(nil), ba.Requests[:i]...) forked = true } @@ -597,7 +598,7 @@ func (tp *txnPipeliner) chainToInFlightWrites(ba roachpb.BatchRequest) roachpb.B // transaction cleans up. func (tp *txnPipeliner) updateLockTracking( ctx context.Context, - ba roachpb.BatchRequest, + ba *roachpb.BatchRequest, br *roachpb.BatchResponse, pErr *roachpb.Error, maxBytes int64, @@ -641,7 +642,7 @@ func (tp *txnPipeliner) updateLockTracking( } func (tp *txnPipeliner) updateLockTrackingInner( - ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse, pErr *roachpb.Error, + ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse, pErr *roachpb.Error, ) { // If the request failed, add all lock acquisitions attempts directly to the // lock footprint. This reduces the likelihood of dangling locks blocking @@ -658,7 +659,7 @@ func (tp *txnPipeliner) updateLockTrackingInner( // timeout / queue depth limit. In such cases, this optimization prevents // these transactions from adding even more load to the contended key by // trying to perform unnecessary intent resolution. - baStripped := ba + baStripped := *ba if roachpb.ErrPriority(pErr.GoError()) <= roachpb.ErrorScoreUnambiguousError && pErr.Index != nil { baStripped.Requests = make([]roachpb.RequestUnion, len(ba.Requests)-1) copy(baStripped.Requests, ba.Requests[:pErr.Index.Index]) @@ -759,7 +760,7 @@ func (tp *txnPipeliner) stripQueryIntents(br *roachpb.BatchResponse) *roachpb.Ba // It transforms any IntentMissingError into a TransactionRetryError and fixes // the error's index position. func (tp *txnPipeliner) adjustError( - ctx context.Context, ba roachpb.BatchRequest, pErr *roachpb.Error, + ctx context.Context, ba *roachpb.BatchRequest, pErr *roachpb.Error, ) *roachpb.Error { // Fix the error index to hide the impact of any QueryIntent requests. if pErr.Index != nil { diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner_client_test.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner_client_test.go index 06d3ce43dd72..33cd10f35c31 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner_client_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner_client_test.go @@ -88,7 +88,7 @@ func TestTxnPipelinerCondenseLockSpans(t *testing.T) { // Check end transaction locks, which should be condensed and split // at range boundaries. expLocks := []roachpb.Span{aToBClosed, cToEClosed, fTog1} - sendFn := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + sendFn := func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { resp := ba.CreateReply() resp.Txn = ba.Txn if req, ok := ba.GetArg(roachpb.EndTxn); ok { diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner_test.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner_test.go index 933826e2017f..5ba3db1bbe3e 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_pipeliner_test.go @@ -35,11 +35,11 @@ import ( // mock out and adjust the SendLocked method. If no mock function is set, a call // to SendLocked will return the default successful response. type mockLockedSender struct { - mockFn func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) + mockFn func(*roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) } func (m *mockLockedSender) SendLocked( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if m.mockFn == nil { br := ba.CreateReply() @@ -51,7 +51,7 @@ func (m *mockLockedSender) SendLocked( // MockSend sets the mockLockedSender mocking function. func (m *mockLockedSender) MockSend( - fn func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error), + fn func(*roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error), ) { m.mockFn = fn } @@ -60,12 +60,12 @@ func (m *mockLockedSender) MockSend( // The provided mocking functions are set in the order that they are provided // and a given mocking function is set after the previous one has been called. func (m *mockLockedSender) ChainMockSend( - fns ...func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error), + fns ...func(*roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error), ) { for i := range fns { i := i fn := fns[i] - fns[i] = func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + fns[i] = func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if i < len(fns)-1 { m.mockFn = fns[i+1] } @@ -117,7 +117,7 @@ func TestTxnPipeliner1PCTransaction(t *testing.T) { keyA, keyB := roachpb.Key("a"), roachpb.Key("b") keyC, keyD := roachpb.Key("c"), roachpb.Key("d") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} scanArgs := roachpb.ScanRequest{ RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}, @@ -134,7 +134,7 @@ func TestTxnPipeliner1PCTransaction(t *testing.T) { ba.Add(&delRngArgs) ba.Add(&roachpb.EndTxnRequest{Commit: true}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 4) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.ScanRequest{}, ba.Requests[0].GetInner()) @@ -178,13 +178,13 @@ func TestTxnPipelinerTrackInFlightWrites(t *testing.T) { txn := makeTxnProto() keyA := roachpb.Key("a") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}} putArgs.Sequence = 1 ba.Add(&putArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.True(t, ba.AsyncConsensus) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -221,7 +221,7 @@ func TestTxnPipelinerTrackInFlightWrites(t *testing.T) { delArgs.Sequence = 5 ba.Add(&delArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 5) require.True(t, ba.AsyncConsensus) require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner()) @@ -274,7 +274,7 @@ func TestTxnPipelinerTrackInFlightWrites(t *testing.T) { etArgs.Sequence = 7 ba.Add(&etArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 5) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -332,11 +332,11 @@ func TestTxnPipelinerReads(t *testing.T) { keyA, keyC := roachpb.Key("a"), roachpb.Key("c") // Read-only. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.GetRequest{}, ba.Requests[0].GetInner()) @@ -355,7 +355,7 @@ func TestTxnPipelinerReads(t *testing.T) { ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.GetRequest{}, ba.Requests[0].GetInner()) @@ -375,7 +375,7 @@ func TestTxnPipelinerReads(t *testing.T) { ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}}) ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -398,7 +398,7 @@ func TestTxnPipelinerReads(t *testing.T) { ba.Requests = nil ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner()) @@ -435,12 +435,12 @@ func TestTxnPipelinerRangedWrites(t *testing.T) { txn := makeTxnProto() keyA, keyD := roachpb.Key("a"), roachpb.Key("d") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.DeleteRangeRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyD}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -469,7 +469,7 @@ func TestTxnPipelinerRangedWrites(t *testing.T) { tp.ifWrites.insert(roachpb.Key("e"), 13) require.Equal(t, 5, tp.ifWrites.len()) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 5) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner()) @@ -519,12 +519,12 @@ func TestTxnPipelinerNonTransactionalRequests(t *testing.T) { txn := makeTxnProto() keyA, keyC := roachpb.Key("a"), roachpb.Key("c") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.True(t, ba.AsyncConsensus) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -548,7 +548,7 @@ func TestTxnPipelinerNonTransactionalRequests(t *testing.T) { RequestHeader: roachpb.RequestHeader{Key: keyRangeDesc}, }) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner()) @@ -591,7 +591,7 @@ func TestTxnPipelinerManyWrites(t *testing.T) { makeSeq := func(i int) enginepb.TxnSeq { return enginepb.TxnSeq(i) + 1 } txn := makeTxnProto() - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} for i := 0; i < writes; i++ { putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: makeKey(i)}} @@ -599,7 +599,7 @@ func TestTxnPipelinerManyWrites(t *testing.T) { ba.Add(&putArgs) } - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, writes) require.True(t, ba.AsyncConsensus) for i := 0; i < writes; i++ { @@ -625,7 +625,7 @@ func TestTxnPipelinerManyWrites(t *testing.T) { } } - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, writes) require.False(t, ba.AsyncConsensus) for i := 0; i < writes; i++ { @@ -680,13 +680,13 @@ func TestTxnPipelinerTransactionAbort(t *testing.T) { txn := makeTxnProto() keyA := roachpb.Key("a") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}} putArgs.Sequence = 1 ba.Add(&putArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.True(t, ba.AsyncConsensus) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -712,7 +712,7 @@ func TestTxnPipelinerTransactionAbort(t *testing.T) { etArgs.Sequence = 2 ba.Add(&etArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[0].GetInner()) @@ -740,7 +740,7 @@ func TestTxnPipelinerTransactionAbort(t *testing.T) { etArgs.Sequence = 2 ba.Add(&etArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[0].GetInner()) @@ -792,7 +792,7 @@ func TestTxnPipelinerIntentMissingError(t *testing.T) { keyA, keyB := roachpb.Key("a"), roachpb.Key("b") keyC, keyD := roachpb.Key("c"), roachpb.Key("d") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.DeleteRangeRequest{RequestHeader: roachpb.RequestHeader{Key: keyB, EndKey: keyD}}) @@ -812,7 +812,7 @@ func TestTxnPipelinerIntentMissingError(t *testing.T) { 5: 2, // intent on key "d" missing } { t.Run(fmt.Sprintf("errIdx=%d", errIdx), func(t *testing.T) { - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 7) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner()) @@ -854,13 +854,13 @@ func TestTxnPipelinerEnableDisableMixTxn(t *testing.T) { txn := makeTxnProto() keyA, keyC := roachpb.Key("a"), roachpb.Key("c") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}} putArgs.Sequence = 1 ba.Add(&putArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -886,7 +886,7 @@ func TestTxnPipelinerEnableDisableMixTxn(t *testing.T) { putArgs3.Sequence = 3 ba.Add(&putArgs3) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.True(t, ba.AsyncConsensus) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -911,7 +911,7 @@ func TestTxnPipelinerEnableDisableMixTxn(t *testing.T) { putArgs4.Sequence = 4 ba.Add(&putArgs4) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner()) @@ -939,7 +939,7 @@ func TestTxnPipelinerEnableDisableMixTxn(t *testing.T) { etArgs.Sequence = 5 ba.Add(&etArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner()) @@ -991,14 +991,14 @@ func TestTxnPipelinerMaxInFlightSize(t *testing.T) { keyC, keyD := roachpb.Key("c"), roachpb.Key("d") // Send a batch that would exceed the limit. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}}) ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}}) ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyD}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 4) require.False(t, ba.AsyncConsensus) @@ -1020,7 +1020,7 @@ func TestTxnPipelinerMaxInFlightSize(t *testing.T) { ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}}) ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.True(t, ba.AsyncConsensus) @@ -1038,7 +1038,7 @@ func TestTxnPipelinerMaxInFlightSize(t *testing.T) { ba.Requests = nil ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyD}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.False(t, ba.AsyncConsensus) @@ -1058,7 +1058,7 @@ func TestTxnPipelinerMaxInFlightSize(t *testing.T) { ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 4) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner()) @@ -1085,7 +1085,7 @@ func TestTxnPipelinerMaxInFlightSize(t *testing.T) { ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}}) ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.True(t, ba.AsyncConsensus) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -1106,7 +1106,7 @@ func TestTxnPipelinerMaxInFlightSize(t *testing.T) { // Send the same batch again. Even though it would prove two in-flight // writes while performing two others, we won't allow it to perform async // consensus because the estimation is conservative. - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 4) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner()) @@ -1137,7 +1137,7 @@ func TestTxnPipelinerMaxInFlightSize(t *testing.T) { ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}}) ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyD}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 4) require.True(t, ba.AsyncConsensus) @@ -1172,11 +1172,11 @@ func TestTxnPipelinerMaxBatchSize(t *testing.T) { keyA, keyC := roachpb.Key("a"), roachpb.Key("c") // Batch below limit. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.True(t, ba.AsyncConsensus) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -1196,7 +1196,7 @@ func TestTxnPipelinerMaxBatchSize(t *testing.T) { ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyC}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner()) @@ -1218,7 +1218,7 @@ func TestTxnPipelinerMaxBatchSize(t *testing.T) { pipelinedWritesMaxBatchSize.Override(ctx, &tp.st.SV, 2) // Same batch now below limit. - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.True(t, ba.AsyncConsensus) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -1250,14 +1250,14 @@ func TestTxnPipelinerRecordsLocksOnFailure(t *testing.T) { // Return an error for a point write, a range write, and a range locking // read. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.DeleteRangeRequest{RequestHeader: roachpb.RequestHeader{Key: keyB, EndKey: keyB.Next()}}) ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyC, EndKey: keyC.Next()}, KeyLocking: lock.Exclusive}) mockPErr := roachpb.NewErrorf("boom") - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -1285,7 +1285,7 @@ func TestTxnPipelinerRecordsLocksOnFailure(t *testing.T) { ba.Add(&roachpb.DeleteRangeRequest{RequestHeader: roachpb.RequestHeader{Key: keyE, EndKey: keyE.Next()}}) ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyF, EndKey: keyF.Next()}, KeyLocking: lock.Exclusive}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -1311,7 +1311,7 @@ func TestTxnPipelinerRecordsLocksOnFailure(t *testing.T) { ba.Requests = nil ba.Add(&roachpb.EndTxnRequest{Commit: false}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[0].GetInner()) @@ -1347,7 +1347,7 @@ func TestTxnPipelinerIgnoresLocksOnUnambiguousFailure(t *testing.T) { // Return a ConditionalFailed error for a CPut. The lock spans correspond to // the CPut are not added to the lock footprint, but the lock spans for all // other requests in the batch are. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.ConditionalPutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.DeleteRangeRequest{RequestHeader: roachpb.RequestHeader{Key: keyB, EndKey: keyB.Next()}}) @@ -1355,7 +1355,7 @@ func TestTxnPipelinerIgnoresLocksOnUnambiguousFailure(t *testing.T) { condFailedErr := roachpb.NewError(&roachpb.ConditionFailedError{}) condFailedErr.SetErrorIndex(0) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.ConditionalPutRequest{}, ba.Requests[0].GetInner()) @@ -1385,7 +1385,7 @@ func TestTxnPipelinerIgnoresLocksOnUnambiguousFailure(t *testing.T) { writeIntentErr := roachpb.NewError(&roachpb.WriteIntentError{}) writeIntentErr.SetErrorIndex(2) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.ConditionalPutRequest{}, ba.Requests[0].GetInner()) @@ -1433,10 +1433,10 @@ func TestTxnPipelinerSavepoints(t *testing.T) { // Now verify one of the writes. When we'll rollback to the savepoint below, // we'll check that the verified write stayed verified. txn := makeTxnProto() - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("a")}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.False(t, ba.AsyncConsensus) require.IsType(t, &roachpb.QueryIntentRequest{}, ba.Requests[0].GetInner()) @@ -1514,7 +1514,7 @@ func TestTxnPipelinerCondenseLockSpans2(t *testing.T) { // The budget. maxBytes int64 // The request that the test sends. - req roachpb.BatchRequest + req *roachpb.BatchRequest // The expected state after the request returns. expLockSpans []span expIfWrites []string @@ -1581,7 +1581,7 @@ func TestTxnPipelinerCondenseLockSpans2(t *testing.T) { } txn := makeTxnProto() - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() br.Txn = ba.Txn return br, nil @@ -1615,8 +1615,8 @@ func TestTxnPipelinerCondenseLockSpans2(t *testing.T) { } } -func putBatch(key roachpb.Key, value []byte) roachpb.BatchRequest { - ba := roachpb.BatchRequest{} +func putBatch(key roachpb.Key, value []byte) *roachpb.BatchRequest { + ba := &roachpb.BatchRequest{} ba.Add(&roachpb.PutRequest{ RequestHeader: roachpb.RequestHeader{ Key: key, @@ -1629,7 +1629,7 @@ func putBatch(key roachpb.Key, value []byte) roachpb.BatchRequest { // putBatchNoAsyncConsesnsus returns a PutRequest addressed to the default // replica for the specified key / value. The batch also contains a Get, which // inhibits the asyncConsensus flag. -func putBatchNoAsyncConsensus(key roachpb.Key, value []byte) roachpb.BatchRequest { +func putBatchNoAsyncConsensus(key roachpb.Key, value []byte) *roachpb.BatchRequest { ba := putBatch(key, value) ba.Add(&roachpb.GetRequest{ RequestHeader: roachpb.RequestHeader{ @@ -1683,7 +1683,7 @@ func TestTxnPipelinerRejectAboveBudget(t *testing.T) { largeWrite := putBatch(largeAs, nil) mediumWrite := putBatch(largeAs[:5], nil) - delRange := roachpb.BatchRequest{} + delRange := &roachpb.BatchRequest{} delRange.Header.MaxSpanRequestKeys = 1 delRange.Add(&roachpb.DeleteRangeRequest{ RequestHeader: roachpb.RequestHeader{ @@ -1700,7 +1700,7 @@ func TestTxnPipelinerRejectAboveBudget(t *testing.T) { testCases := []struct { name string // The requests to be sent one by one. - reqs []roachpb.BatchRequest + reqs []*roachpb.BatchRequest // The responses for reqs. If an entry is nil, a response is automatically // generated for it. Requests past the end of the resp array are also // generated automatically. @@ -1711,12 +1711,12 @@ func TestTxnPipelinerRejectAboveBudget(t *testing.T) { maxSize int64 }{ {name: "large request", - reqs: []roachpb.BatchRequest{largeWrite}, + reqs: []*roachpb.BatchRequest{largeWrite}, expRejectIdx: 0, maxSize: int64(len(largeAs)) - 1 + roachpb.SpanOverhead, }, {name: "requests that add up", - reqs: []roachpb.BatchRequest{ + reqs: []*roachpb.BatchRequest{ putBatchNoAsyncConsensus(roachpb.Key("aaaa"), nil), putBatchNoAsyncConsensus(roachpb.Key("bbbb"), nil), putBatchNoAsyncConsensus(roachpb.Key("cccc"), nil)}, @@ -1729,7 +1729,7 @@ func TestTxnPipelinerRejectAboveBudget(t *testing.T) { // Like the previous test, but this time the requests run with async // consensus. Being tracked as in-flight writes, this test shows that // in-flight writes count towards the budget. - reqs: []roachpb.BatchRequest{ + reqs: []*roachpb.BatchRequest{ putBatch(roachpb.Key("aaaa"), nil), putBatch(roachpb.Key("bbbb"), nil), putBatch(roachpb.Key("cccc"), nil)}, @@ -1740,7 +1740,7 @@ func TestTxnPipelinerRejectAboveBudget(t *testing.T) { name: "response goes over budget, next request rejected", // A request returns a response with a large resume span, which takes up // the budget. Then the next request will be rejected. - reqs: []roachpb.BatchRequest{delRange, putBatch(roachpb.Key("a"), nil)}, + reqs: []*roachpb.BatchRequest{delRange, putBatch(roachpb.Key("a"), nil)}, resp: []*roachpb.BatchResponse{delRangeResp}, expRejectIdx: 1, maxSize: 10 + roachpb.SpanOverhead, @@ -1750,7 +1750,7 @@ func TestTxnPipelinerRejectAboveBudget(t *testing.T) { // Like the previous test, except here we don't have a followup request // once we're above budget. The test runner will commit the txn, and this // test checks that committing is allowed. - reqs: []roachpb.BatchRequest{delRange}, + reqs: []*roachpb.BatchRequest{delRange}, resp: []*roachpb.BatchResponse{delRangeResp}, expRejectIdx: -1, maxSize: 10 + roachpb.SpanOverhead, @@ -1758,7 +1758,7 @@ func TestTxnPipelinerRejectAboveBudget(t *testing.T) { { // Request keys overlap, so they don't count twice. name: "overlapping requests", - reqs: []roachpb.BatchRequest{mediumWrite, mediumWrite, mediumWrite}, + reqs: []*roachpb.BatchRequest{mediumWrite, mediumWrite, mediumWrite}, expRejectIdx: -1, // Our estimation logic for rejecting requests based on size // consults both the in-flight write set (which doesn't account for @@ -1780,7 +1780,7 @@ func TestTxnPipelinerRejectAboveBudget(t *testing.T) { txn := makeTxnProto() var respIdx int - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { // Handle rollbacks and commits separately. if ba.IsSingleAbortTxnRequest() || ba.IsSingleCommitRequest() { br := ba.CreateReply() @@ -1817,7 +1817,7 @@ func TestTxnPipelinerRejectAboveBudget(t *testing.T) { require.Equal(t, int64(1), tp.txnMetrics.TxnsRejectedByLockSpanBudget.Count()) // Make sure rolling back the txn works. - rollback := roachpb.BatchRequest{} + rollback := &roachpb.BatchRequest{} rollback.Add(&roachpb.EndTxnRequest{Commit: false}) rollback.Txn = &txn _, pErr = tp.SendLocked(ctx, rollback) @@ -1831,7 +1831,7 @@ func TestTxnPipelinerRejectAboveBudget(t *testing.T) { // to be over budget and the response surprised us with a large // ResumeSpan). Committing in these situations is allowed, since the // harm has already been done. - commit := roachpb.BatchRequest{} + commit := &roachpb.BatchRequest{} commit.Add(&roachpb.EndTxnRequest{Commit: true}) commit.Txn = &txn _, pErr = tp.SendLocked(ctx, commit) diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator.go index eb6c0e6c1d43..c867417e11d5 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator.go @@ -79,7 +79,7 @@ type txnSeqNumAllocator struct { // SendLocked is part of the txnInterceptor interface. func (s *txnSeqNumAllocator) SendLocked( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { for _, ru := range ba.Requests { req := ru.GetInner() diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator_test.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator_test.go index 0beda8bb2bfb..f677450afa2f 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator_test.go @@ -43,12 +43,12 @@ func TestSequenceNumberAllocation(t *testing.T) { keyA, keyB := roachpb.Key("a"), roachpb.Key("b") // Read-only requests are not given unique sequence numbers. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.Equal(t, enginepb.TxnSeq(0), ba.Requests[0].GetInner().Header().Sequence) require.Equal(t, enginepb.TxnSeq(0), ba.Requests[1].GetInner().Header().Sequence) @@ -69,7 +69,7 @@ func TestSequenceNumberAllocation(t *testing.T) { ba.Add(&roachpb.InitPutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 4) require.Equal(t, enginepb.TxnSeq(1), ba.Requests[0].GetInner().Header().Sequence) require.Equal(t, enginepb.TxnSeq(1), ba.Requests[1].GetInner().Header().Sequence) @@ -91,7 +91,7 @@ func TestSequenceNumberAllocation(t *testing.T) { ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}}) ba.Add(&roachpb.EndTxnRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.Equal(t, enginepb.TxnSeq(3), ba.Requests[0].GetInner().Header().Sequence) require.Equal(t, enginepb.TxnSeq(3), ba.Requests[1].GetInner().Header().Sequence) @@ -133,12 +133,12 @@ func TestSequenceNumberAllocationWithStep(t *testing.T) { t.Run(fmt.Sprintf("step %d", i), func(t *testing.T) { currentStepSeqNum := s.writeSeq - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.Equal(t, currentStepSeqNum, ba.Requests[0].GetInner().Header().Sequence) require.Equal(t, currentStepSeqNum, ba.Requests[1].GetInner().Header().Sequence) @@ -160,7 +160,7 @@ func TestSequenceNumberAllocationWithStep(t *testing.T) { ba.Add(&roachpb.InitPutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 4) require.Equal(t, currentStepSeqNum+1, ba.Requests[0].GetInner().Header().Sequence) require.Equal(t, currentStepSeqNum, ba.Requests[1].GetInner().Header().Sequence) @@ -183,7 +183,7 @@ func TestSequenceNumberAllocationWithStep(t *testing.T) { ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}}) ba.Add(&roachpb.EndTxnRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.Equal(t, currentStepSeqNum+3, ba.Requests[0].GetInner().Header().Sequence) require.Equal(t, currentStepSeqNum, ba.Requests[1].GetInner().Header().Sequence) @@ -204,14 +204,14 @@ func TestSequenceNumberAllocationWithStep(t *testing.T) { s.configureSteppingLocked(false /* enabled */) currentStepSeqNum := s.writeSeq - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Requests = nil ba.Add(&roachpb.ConditionalPutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.InitPutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 4) require.Equal(t, currentStepSeqNum+1, ba.Requests[0].GetInner().Header().Sequence) require.Equal(t, currentStepSeqNum+1, ba.Requests[1].GetInner().Header().Sequence) @@ -242,7 +242,7 @@ func TestModifyReadSeqNum(t *testing.T) { keyA := roachpb.Key("a") s.configureSteppingLocked(true /* enabled */) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() br.Txn = ba.Txn return br, nil @@ -262,7 +262,7 @@ func TestModifyReadSeqNum(t *testing.T) { if err := s.stepLocked(ctx); err != nil { t.Fatal(err) } - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) br, pErr := s.SendLocked(ctx, ba) @@ -276,7 +276,7 @@ func TestModifyReadSeqNum(t *testing.T) { if err := s.stepLocked(ctx); err != nil { t.Fatal(err) } - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) br, pErr = s.SendLocked(ctx, ba) @@ -290,10 +290,10 @@ func TestModifyReadSeqNum(t *testing.T) { t.Fatal(err) } s.readSeq = cursorSeqNum - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Equal(t, cursorSeqNum, ba.Requests[0].GetGet().RequestHeader.Sequence) br := ba.CreateReply() br.Txn = ba.Txn @@ -306,10 +306,10 @@ func TestModifyReadSeqNum(t *testing.T) { if err := s.stepLocked(ctx); err != nil { t.Fatal(err) } - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Equal(t, curReadSeq, ba.Requests[0].GetGet().RequestHeader.Sequence) br := ba.CreateReply() br.Txn = ba.Txn @@ -321,10 +321,10 @@ func TestModifyReadSeqNum(t *testing.T) { if err := s.stepLocked(ctx); err != nil { t.Fatal(err) } - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Equal(t, s.writeSeq, ba.Requests[0].GetGet().RequestHeader.Sequence) br := ba.CreateReply() br.Txn = ba.Txn @@ -338,10 +338,10 @@ func TestModifyReadSeqNum(t *testing.T) { t.Fatal(err) } s.readSeq = cursorSeqNum - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Equal(t, cursorSeqNum, ba.Requests[0].GetGet().RequestHeader.Sequence) br := ba.CreateReply() br.Txn = ba.Txn @@ -354,10 +354,10 @@ func TestModifyReadSeqNum(t *testing.T) { if err := s.stepLocked(ctx); err != nil { t.Fatal(err) } - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Equal(t, curReadSeq, ba.Requests[0].GetGet().RequestHeader.Sequence) br := ba.CreateReply() br.Txn = ba.Txn @@ -377,12 +377,12 @@ func TestSequenceNumberAllocationTxnRequests(t *testing.T) { txn := makeTxnProto() keyA := roachpb.Key("a") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.HeartbeatTxnRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.EndTxnRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.Equal(t, enginepb.TxnSeq(0), ba.Requests[0].GetInner().Header().Sequence) require.Equal(t, enginepb.TxnSeq(1), ba.Requests[1].GetInner().Header().Sequence) @@ -409,13 +409,13 @@ func TestSequenceNumberAllocationAfterEpochBump(t *testing.T) { keyA, keyB := roachpb.Key("a"), roachpb.Key("b") // Perform a few writes to increase the sequence number counter. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.ConditionalPutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.InitPutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.Equal(t, enginepb.TxnSeq(1), ba.Requests[0].GetInner().Header().Sequence) require.Equal(t, enginepb.TxnSeq(1), ba.Requests[1].GetInner().Header().Sequence) @@ -441,7 +441,7 @@ func TestSequenceNumberAllocationAfterEpochBump(t *testing.T) { ba.Add(&roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}}) ba.Add(&roachpb.InitPutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 4) require.Equal(t, enginepb.TxnSeq(0), ba.Requests[0].GetInner().Header().Sequence) require.Equal(t, enginepb.TxnSeq(1), ba.Requests[1].GetInner().Header().Sequence) @@ -477,12 +477,12 @@ func TestSequenceNumberAllocationAfterLeafInitialization(t *testing.T) { // Perform a few reads and writes. The sequence numbers assigned should // start at the sequence number provided in the LeafTxnInputState. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.Equal(t, enginepb.TxnSeq(4), ba.Requests[0].GetInner().Header().Sequence) require.Equal(t, enginepb.TxnSeq(4), ba.Requests[1].GetInner().Header().Sequence) @@ -508,12 +508,12 @@ func TestSequenceNumberAllocationSavepoint(t *testing.T) { keyA, keyB := roachpb.Key("a"), roachpb.Key("b") // Perform a few writes to increase the sequence number counter. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() br.Txn = ba.Txn return br, nil diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher.go index d1a9e1fbe207..d96e356fadfa 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher.go @@ -138,7 +138,7 @@ type txnSpanRefresher struct { // SendLocked implements the lockedSender interface. func (sr *txnSpanRefresher) SendLocked( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // Set the batch's CanForwardReadTimestamp flag. ba.CanForwardReadTimestamp = sr.canForwardReadTimestampWithoutRefresh(ba.Txn) @@ -214,7 +214,7 @@ func (sr *txnSpanRefresher) maybeCondenseRefreshSpans( // catches serializable errors and attempts to avoid them by refreshing the txn // at a larger timestamp. func (sr *txnSpanRefresher) sendLockedWithRefreshAttempts( - ctx context.Context, ba roachpb.BatchRequest, maxRefreshAttempts int, + ctx context.Context, ba *roachpb.BatchRequest, maxRefreshAttempts int, ) (*roachpb.BatchResponse, *roachpb.Error) { if ba.Txn.WriteTooOld { // The WriteTooOld flag is not supposed to be set on requests. It's only set @@ -269,7 +269,7 @@ func (sr *txnSpanRefresher) sendLockedWithRefreshAttempts( log.VEventf(ctx, 2, "not checking error for refresh; refresh attempts exhausted") } } - if err := sr.forwardRefreshTimestampOnResponse(&ba, br, pErr); err != nil { + if err := sr.forwardRefreshTimestampOnResponse(ba, br, pErr); err != nil { return nil, roachpb.NewError(err) } return br, pErr @@ -280,7 +280,7 @@ func (sr *txnSpanRefresher) sendLockedWithRefreshAttempts( // txn timestamp, it recurses into sendLockedWithRefreshAttempts and retries the // batch. If the refresh fails, the input pErr is returned. func (sr *txnSpanRefresher) maybeRefreshAndRetrySend( - ctx context.Context, ba roachpb.BatchRequest, pErr *roachpb.Error, maxRefreshAttempts int, + ctx context.Context, ba *roachpb.BatchRequest, pErr *roachpb.Error, maxRefreshAttempts int, ) (*roachpb.BatchResponse, *roachpb.Error) { txn := pErr.GetTxn() if txn == nil || !sr.canForwardReadTimestamp(txn) { @@ -308,6 +308,7 @@ func (sr *txnSpanRefresher) maybeRefreshAndRetrySend( // We've refreshed all of the read spans successfully and bumped // ba.Txn's timestamps. Attempt the request again. log.Eventf(ctx, "refresh succeeded; retrying original request") + ba = ba.ShallowCopy() ba.UpdateTxn(refreshToTxn) sr.refreshAutoRetries.Inc(1) @@ -342,7 +343,7 @@ func (sr *txnSpanRefresher) maybeRefreshAndRetrySend( // only the EndTxn request. It then issues the two partial batches in order, // stitching their results back together at the end. func (sr *txnSpanRefresher) splitEndTxnAndRetrySend( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // NOTE: call back into SendLocked with each partial batch, not into // sendLockedWithRefreshAttempts. This ensures that we properly set @@ -351,7 +352,7 @@ func (sr *txnSpanRefresher) splitEndTxnAndRetrySend( // Issue a batch up to but not including the EndTxn request. etIdx := len(ba.Requests) - 1 - baPrefix := ba + baPrefix := ba.ShallowCopy() baPrefix.Requests = ba.Requests[:etIdx] brPrefix, pErr := sr.SendLocked(ctx, baPrefix) if pErr != nil { @@ -359,7 +360,7 @@ func (sr *txnSpanRefresher) splitEndTxnAndRetrySend( } // Issue a batch containing only the EndTxn request. - baSuffix := ba + baSuffix := ba.ShallowCopy() baSuffix.Requests = ba.Requests[etIdx:] baSuffix.UpdateTxn(brPrefix.Txn) brSuffix, pErr := sr.SendLocked(ctx, baSuffix) @@ -384,8 +385,8 @@ func (sr *txnSpanRefresher) splitEndTxnAndRetrySend( // If the force flag is true, the refresh will be attempted even if a refresh // is not inevitable. func (sr *txnSpanRefresher) maybeRefreshPreemptivelyLocked( - ctx context.Context, ba roachpb.BatchRequest, force bool, -) (roachpb.BatchRequest, *roachpb.Error) { + ctx context.Context, ba *roachpb.BatchRequest, force bool, +) (*roachpb.BatchRequest, *roachpb.Error) { // If we know that the transaction will need a refresh at some point because // its write timestamp has diverged from its read timestamp, consider doing // so preemptively. We perform a preemptive refresh if either a) doing so @@ -445,7 +446,7 @@ func (sr *txnSpanRefresher) maybeRefreshPreemptivelyLocked( // If the transaction cannot change its read timestamp, no refresh is // possible. if !sr.canForwardReadTimestamp(ba.Txn) { - return ba, newRetryErrorOnFailedPreemptiveRefresh(ba.Txn, nil) + return nil, newRetryErrorOnFailedPreemptiveRefresh(ba.Txn, nil) } refreshFrom := ba.Txn.ReadTimestamp @@ -457,10 +458,11 @@ func (sr *txnSpanRefresher) maybeRefreshPreemptivelyLocked( // Try refreshing the txn spans at a timestamp that will allow us to commit. if refreshErr := sr.tryRefreshTxnSpans(ctx, refreshFrom, refreshToTxn); refreshErr != nil { log.Eventf(ctx, "preemptive refresh failed; propagating retry error") - return roachpb.BatchRequest{}, newRetryErrorOnFailedPreemptiveRefresh(ba.Txn, refreshErr) + return nil, newRetryErrorOnFailedPreemptiveRefresh(ba.Txn, refreshErr) } log.Eventf(ctx, "preemptive refresh succeeded") + ba = ba.ShallowCopy() ba.UpdateTxn(refreshToTxn) return ba, nil } @@ -517,7 +519,7 @@ func (sr *txnSpanRefresher) tryRefreshTxnSpans( // Refresh all spans (merge first). // TODO(nvanbenschoten): actually merge spans. - refreshSpanBa := roachpb.BatchRequest{} + refreshSpanBa := &roachpb.BatchRequest{} refreshSpanBa.Txn = refreshToTxn addRefreshes := func(refreshes *condensableSpanSet) { // We're going to check writes between the previous refreshed timestamp, if @@ -564,7 +566,7 @@ func (sr *txnSpanRefresher) tryRefreshTxnSpans( // appendRefreshSpans appends refresh spans from the supplied batch request, // qualified by the batch response where appropriate. func (sr *txnSpanRefresher) appendRefreshSpans( - ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse, + ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse, ) error { expLogEnabled := log.ExpensiveLogEnabled(ctx, 3) return ba.RefreshSpanIterate(br, func(span roachpb.Span) { diff --git a/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher_test.go b/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher_test.go index bf61f077b051..bc8fad7575dd 100644 --- a/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher_test.go @@ -55,14 +55,14 @@ func TestTxnSpanRefresherCollectsSpans(t *testing.T) { keyC, keyD := roachpb.Key("c"), roachpb.Key("d") // Basic case. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} getArgs := roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}} putArgs := roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}} delRangeArgs := roachpb.DeleteRangeRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}} ba.Add(&getArgs, &putArgs, &delRangeArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.True(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.GetRequest{}, ba.Requests[0].GetInner()) @@ -89,7 +89,7 @@ func TestTxnSpanRefresherCollectsSpans(t *testing.T) { scanArgs := roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyB, EndKey: keyD}} ba.Add(&scanArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.False(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.ScanRequest{}, ba.Requests[0].GetInner()) @@ -126,7 +126,7 @@ func TestTxnSpanRefresherRefreshesTransactions(t *testing.T) { name string // OnFirstSend, if set, is invoked to evaluate the batch. If not set, pErr() // will be used to provide an error. - onFirstSend func(request roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) + onFirstSend func(*roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) pErr func() *roachpb.Error expRefresh bool expRefreshTS hlc.Timestamp @@ -184,7 +184,7 @@ func TestTxnSpanRefresherRefreshesTransactions(t *testing.T) { }, { name: "write_too_old flag", - onFirstSend: func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onFirstSend: func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() br.Txn = ba.Txn.Clone() br.Txn.WriteTooOld = true @@ -208,7 +208,7 @@ func TestTxnSpanRefresherRefreshesTransactions(t *testing.T) { tsr, mockSender := makeMockTxnSpanRefresher() // Collect some refresh spans. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn.Clone()} // clone txn since it's shared between subtests getArgs := roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}} delRangeArgs := roachpb.DeleteRangeRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}} @@ -223,7 +223,7 @@ func TestTxnSpanRefresherRefreshesTransactions(t *testing.T) { require.Zero(t, tsr.refreshedTimestamp) // Hook up a chain of mocking functions. - onFirstSend := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onFirstSend := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -235,7 +235,7 @@ func TestTxnSpanRefresherRefreshesTransactions(t *testing.T) { pErr.SetTxn(ba.Txn) return nil, pErr } - onSecondSend := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onSecondSend := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { // Should not be called if !expRefresh. require.True(t, tc.expRefresh) @@ -247,7 +247,7 @@ func TestTxnSpanRefresherRefreshesTransactions(t *testing.T) { br.Txn = ba.Txn return br, nil } - onRefresh := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onRefresh := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { // Should not be called if !expRefresh. require.True(t, tc.expRefresh) @@ -315,7 +315,7 @@ func TestTxnSpanRefresherMaxRefreshAttempts(t *testing.T) { tsr.knobs.MaxTxnRefreshAttempts = 2 // Collect some refresh spans. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} scanArgs := roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}} ba.Add(&scanArgs) @@ -328,7 +328,7 @@ func TestTxnSpanRefresherMaxRefreshAttempts(t *testing.T) { require.Zero(t, tsr.refreshedTimestamp) // Hook up a chain of mocking functions. - onPut := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onPut := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -337,7 +337,7 @@ func TestTxnSpanRefresherMaxRefreshAttempts(t *testing.T) { roachpb.NewTransactionRetryError(roachpb.RETRY_SERIALIZABLE, ""), ba.Txn) } refreshes := 0 - onRefresh := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onRefresh := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { refreshes++ require.Len(t, ba.Requests, 1) require.Equal(t, txn.WriteTimestamp, ba.Txn.ReadTimestamp) @@ -351,7 +351,7 @@ func TestTxnSpanRefresherMaxRefreshAttempts(t *testing.T) { br.Txn = ba.Txn return br, nil } - unexpected := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + unexpected := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Fail(t, "unexpected") return nil, nil } @@ -393,12 +393,12 @@ func TestTxnSpanRefresherPreemptiveRefresh(t *testing.T) { // Send an EndTxn request that will need a refresh to succeed. Because // no refresh spans have been recorded, the preemptive refresh should be // free, so the txnSpanRefresher should do so. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} etArgs := roachpb.EndTxnRequest{Commit: true} ba.Add(&etArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.True(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[0].GetInner()) @@ -432,7 +432,7 @@ func TestTxnSpanRefresherPreemptiveRefresh(t *testing.T) { scanArgs := roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}} ba.Add(&scanArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.True(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.ScanRequest{}, ba.Requests[0].GetInner()) @@ -468,7 +468,7 @@ func TestTxnSpanRefresherPreemptiveRefresh(t *testing.T) { ba.Requests = nil ba.Add(&etArgs) - onRefresh := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onRefresh := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.Equal(t, pushedWriteTs, ba.Txn.ReadTimestamp) require.IsType(t, &roachpb.RefreshRangeRequest{}, ba.Requests[0].GetInner()) @@ -480,7 +480,7 @@ func TestTxnSpanRefresherPreemptiveRefresh(t *testing.T) { return nil, roachpb.NewError(roachpb.NewRefreshFailedError( roachpb.RefreshFailedError_REASON_COMMITTED_VALUE, roachpb.Key("a"), hlc.Timestamp{WallTime: 1})) } - unexpected := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + unexpected := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Fail(t, "unexpected") return nil, nil } @@ -499,7 +499,7 @@ func TestTxnSpanRefresherPreemptiveRefresh(t *testing.T) { require.False(t, tsr.refreshInvalid) // Try again, but this time let the refresh succeed. - onRefresh = func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onRefresh = func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.Equal(t, pushedWriteTs, ba.Txn.ReadTimestamp) require.IsType(t, &roachpb.RefreshRangeRequest{}, ba.Requests[0].GetInner()) @@ -512,7 +512,7 @@ func TestTxnSpanRefresherPreemptiveRefresh(t *testing.T) { br.Txn = ba.Txn return br, nil } - onEndTxn := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onEndTxn := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.False(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.EndTxnRequest{}, ba.Requests[0].GetInner()) @@ -560,7 +560,7 @@ func TestTxnSpanRefresherSplitEndTxnOnAutoRetry(t *testing.T) { // refresh spans due to a Scan. When priorReads is false, issue a {Scan, // Put, EndTxn} batch with no previously accumulated refresh spans. testutils.RunTrueAndFalse(t, "prior_reads", func(t *testing.T, priorReads bool) { - var mockFns []func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) + var mockFns []func(*roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) if priorReads { // Hook up a chain of mocking functions. Expected order of requests: // 1. {Put, EndTxn} -> retry error with pushed timestamp @@ -568,7 +568,7 @@ func TestTxnSpanRefresherSplitEndTxnOnAutoRetry(t *testing.T) { // 3. {Put} -> successful with pushed timestamp // 4. {Refresh} -> successful // 5. {EndTxn} -> successful - onPutAndEndTxn := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onPutAndEndTxn := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.False(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -579,7 +579,7 @@ func TestTxnSpanRefresherSplitEndTxnOnAutoRetry(t *testing.T) { return nil, roachpb.NewErrorWithTxn( roachpb.NewTransactionRetryError(roachpb.RETRY_SERIALIZABLE, ""), pushedTxn) } - onRefresh1 := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onRefresh1 := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.Equal(t, pushedTs1, ba.Txn.ReadTimestamp) require.IsType(t, &roachpb.RefreshRangeRequest{}, ba.Requests[0].GetInner()) @@ -592,7 +592,7 @@ func TestTxnSpanRefresherSplitEndTxnOnAutoRetry(t *testing.T) { br.Txn = ba.Txn return br, nil } - onPut := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onPut := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.False(t, ba.CanForwardReadTimestamp) require.Equal(t, pushedTs1, ba.Txn.ReadTimestamp) @@ -603,7 +603,7 @@ func TestTxnSpanRefresherSplitEndTxnOnAutoRetry(t *testing.T) { br.Txn.WriteTimestamp = pushedTs2 return br, nil } - onRefresh2 := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onRefresh2 := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.Equal(t, pushedTs2, ba.Txn.ReadTimestamp) require.IsType(t, &roachpb.RefreshRangeRequest{}, ba.Requests[0].GetInner()) @@ -616,7 +616,7 @@ func TestTxnSpanRefresherSplitEndTxnOnAutoRetry(t *testing.T) { br.Txn = ba.Txn return br, nil } - onEndTxn := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onEndTxn := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.False(t, ba.CanForwardReadTimestamp) require.Equal(t, pushedTs2, ba.Txn.ReadTimestamp) @@ -634,7 +634,7 @@ func TestTxnSpanRefresherSplitEndTxnOnAutoRetry(t *testing.T) { // 3. {Scan, Put} -> successful with pushed timestamp // 4. {Refresh} -> successful // 5. {EndTxn} -> successful - onScanPutAndEndTxn := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onScanPutAndEndTxn := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 3) require.True(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.ScanRequest{}, ba.Requests[0].GetInner()) @@ -646,7 +646,7 @@ func TestTxnSpanRefresherSplitEndTxnOnAutoRetry(t *testing.T) { return nil, roachpb.NewErrorWithTxn( roachpb.NewTransactionRetryError(roachpb.RETRY_SERIALIZABLE, ""), pushedTxn) } - onScanAndPut := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onScanAndPut := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 2) require.True(t, ba.CanForwardReadTimestamp) require.Equal(t, pushedTs1, ba.Txn.ReadTimestamp) @@ -658,7 +658,7 @@ func TestTxnSpanRefresherSplitEndTxnOnAutoRetry(t *testing.T) { br.Txn.WriteTimestamp = pushedTs2 return br, nil } - onRefresh := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onRefresh := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.Equal(t, pushedTs2, ba.Txn.ReadTimestamp) require.IsType(t, &roachpb.RefreshRangeRequest{}, ba.Requests[0].GetInner()) @@ -671,7 +671,7 @@ func TestTxnSpanRefresherSplitEndTxnOnAutoRetry(t *testing.T) { br.Txn = ba.Txn return br, nil } - onEndTxn := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + onEndTxn := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) // IMPORTANT! CanForwardReadTimestamp should no longer be set // for EndTxn batch, because the Scan in the earlier batch needs @@ -700,7 +700,7 @@ func TestTxnSpanRefresherSplitEndTxnOnAutoRetry(t *testing.T) { ctx := context.Background() tsr, mockSender := makeMockTxnSpanRefresher() - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} if priorReads { // Collect some refresh spans first. ba.Header = roachpb.Header{Txn: &txn} @@ -726,16 +726,16 @@ func TestTxnSpanRefresherSplitEndTxnOnAutoRetry(t *testing.T) { // Construct the mock sender chain, injecting an error where // appropriate. Make a copy of mockFns to avoid sharing state // between subtests. - mockFnsCpy := append([]func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error)(nil), mockFns...) + mockFnsCpy := append([]func(*roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error)(nil), mockFns...) if errIdx < len(mockFnsCpy) { errFn := mockFnsCpy[errIdx] - newErrFn := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + newErrFn := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { _, _ = errFn(ba) return nil, roachpb.NewErrorf("error") } mockFnsCpy[errIdx] = newErrFn } - unexpected := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + unexpected := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Fail(t, "unexpected") return nil, nil } @@ -866,7 +866,7 @@ func TestTxnSpanRefresherMaxTxnRefreshSpansBytes(t *testing.T) { MaxTxnRefreshSpansBytes.Override(ctx, &tsr.st.SV, 3+roachpb.SpanOverhead) // Send a batch below the limit. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} scanArgs := roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}} ba.Add(&scanArgs) @@ -916,7 +916,7 @@ func TestTxnSpanRefresherMaxTxnRefreshSpansBytes(t *testing.T) { // Return a transaction retry error and make sure the metric indicating that // we did not retry due to the refresh span bytes is incremented. - mockSender.MockSend(func(request roachpb.BatchRequest) (batchResponse *roachpb.BatchResponse, r *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (batchResponse *roachpb.BatchResponse, r *roachpb.Error) { return nil, roachpb.NewErrorWithTxn( roachpb.NewTransactionRetryError(roachpb.RETRY_SERIALIZABLE, ""), ba.Txn) }) @@ -943,11 +943,11 @@ func TestTxnSpanRefresherAssignsCanForwardReadTimestamp(t *testing.T) { // Send a Put request. Should set CanForwardReadTimestamp flag. Should not // collect refresh spans. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.True(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -967,11 +967,11 @@ func TestTxnSpanRefresherAssignsCanForwardReadTimestamp(t *testing.T) { // Should NOT set CanForwardReadTimestamp flag. txnFixed := txn.Clone() txnFixed.CommitTimestampFixed = true - var baFixed roachpb.BatchRequest + baFixed := &roachpb.BatchRequest{} baFixed.Header = roachpb.Header{Txn: txnFixed} baFixed.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyA}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.False(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -993,7 +993,7 @@ func TestTxnSpanRefresherAssignsCanForwardReadTimestamp(t *testing.T) { scanArgs := roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}} ba.Add(&scanArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.True(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.ScanRequest{}, ba.Requests[0].GetInner()) @@ -1014,7 +1014,7 @@ func TestTxnSpanRefresherAssignsCanForwardReadTimestamp(t *testing.T) { scanArgs2 := roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyC, EndKey: keyD}} ba.Add(&scanArgs2) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.False(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.ScanRequest{}, ba.Requests[0].GetInner()) @@ -1034,7 +1034,7 @@ func TestTxnSpanRefresherAssignsCanForwardReadTimestamp(t *testing.T) { ba.Requests = nil ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.False(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -1055,7 +1055,7 @@ func TestTxnSpanRefresherAssignsCanForwardReadTimestamp(t *testing.T) { ba.Requests = nil ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: keyB}}) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.True(t, ba.CanForwardReadTimestamp) require.IsType(t, &roachpb.PutRequest{}, ba.Requests[0].GetInner()) @@ -1091,7 +1091,7 @@ func TestTxnSpanRefresherEpochIncrement(t *testing.T) { MaxTxnRefreshSpansBytes.Override(ctx, &tsr.st.SV, 3+roachpb.SpanOverhead) // Send a batch below the limit. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} scanArgs := roachpb.ScanRequest{RequestHeader: roachpb.RequestHeader{Key: keyA, EndKey: keyB}} ba.Add(&scanArgs) @@ -1146,11 +1146,11 @@ func TestTxnSpanRefresherSavepoint(t *testing.T) { txn := makeTxnProto() read := func(key roachpb.Key) { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} getArgs := roachpb.GetRequest{RequestHeader: roachpb.RequestHeader{Key: key}} ba.Add(&getArgs) - mockSender.MockSend(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + mockSender.MockSend(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { require.Len(t, ba.Requests, 1) require.IsType(t, &roachpb.GetRequest{}, ba.Requests[0].GetInner()) diff --git a/pkg/kv/kvclient/kvcoord/txn_lock_gatekeeper.go b/pkg/kv/kvclient/kvcoord/txn_lock_gatekeeper.go index 8bfa2f8f9868..9948611d3adb 100644 --- a/pkg/kv/kvclient/kvcoord/txn_lock_gatekeeper.go +++ b/pkg/kv/kvclient/kvcoord/txn_lock_gatekeeper.go @@ -30,7 +30,7 @@ type lockedSender interface { // WARNING: because the lock is released when calling this method and // re-acquired before it returned, callers cannot rely on a single mutual // exclusion zone mainted across the call. - SendLocked(context.Context, roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) + SendLocked(context.Context, *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) } // txnLockGatekeeper is a lockedSender that sits at the bottom of the @@ -54,7 +54,7 @@ type txnLockGatekeeper struct { // SendLocked implements the lockedSender interface. func (gs *txnLockGatekeeper) SendLocked( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // If so configured, protect against concurrent use of the txn. Concurrent // requests don't work generally because of races between clients sending diff --git a/pkg/kv/kvclient/kvcoord/txn_test.go b/pkg/kv/kvclient/kvcoord/txn_test.go index c250b994b856..994c45abda60 100644 --- a/pkg/kv/kvclient/kvcoord/txn_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_test.go @@ -205,7 +205,7 @@ func TestPriorityRatchetOnAbortOrPush(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) s := createTestDBWithKnobs(t, &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { // Reject transaction heartbeats, which can make the test flaky when they // detect an aborted transaction before the Get operation does. See #68584 // for an explanation. @@ -634,7 +634,7 @@ func TestTxnCommitTimestampAdvancedByRefresh(t *testing.T) { var refreshTS hlc.Timestamp errKey := roachpb.Key("inject_err") s := createTestDBWithKnobs(t, &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if g, ok := ba.GetArg(roachpb.Get); ok && g.(*roachpb.GetRequest).Key.Equal(errKey) { if injected { return nil diff --git a/pkg/kv/kvclient/kvstreamer/streamer.go b/pkg/kv/kvclient/kvstreamer/streamer.go index d27bc1e74201..febcd9c73c31 100644 --- a/pkg/kv/kvclient/kvstreamer/streamer.go +++ b/pkg/kv/kvclient/kvstreamer/streamer.go @@ -1141,7 +1141,7 @@ func (w *workerCoordinator) performRequestAsync( }, func(ctx context.Context) { defer w.asyncRequestCleanup(false /* budgetMuAlreadyLocked */) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header.WaitPolicy = w.lockWaitPolicy ba.Header.TargetBytes = targetBytes ba.Header.AllowEmpty = !headOfLine diff --git a/pkg/kv/kvclient/rangefeed/rangefeedcache/cache_test.go b/pkg/kv/kvclient/rangefeed/rangefeedcache/cache_test.go index ffdcb6186efa..d74b642a3fe7 100644 --- a/pkg/kv/kvclient/rangefeed/rangefeedcache/cache_test.go +++ b/pkg/kv/kvclient/rangefeed/rangefeedcache/cache_test.go @@ -66,7 +66,7 @@ func TestCache(t *testing.T) { readRowsAt := func(t *testing.T, ts hlc.Timestamp) []roachpb.KeyValue { txn := kvDB.NewTxn(ctx, "test") require.NoError(t, txn.SetFixedTimestamp(ctx, ts)) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&roachpb.ScanRequest{ RequestHeader: roachpb.RequestHeader{ Key: scratch, diff --git a/pkg/kv/kvprober/kvprober_integration_test.go b/pkg/kv/kvprober/kvprober_integration_test.go index 9eadccd9ca1f..7b56f26cabba 100644 --- a/pkg/kv/kvprober/kvprober_integration_test.go +++ b/pkg/kv/kvprober/kvprober_integration_test.go @@ -87,7 +87,7 @@ func TestProberDoesReadsAndWrites(t *testing.T) { t.Run("a single range is unavailable for all KV ops", func(t *testing.T) { s, _, p, cleanup := initTestProber(t, base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(i context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(i context.Context, ba *roachpb.BatchRequest) *roachpb.Error { for _, ru := range ba.Requests { key := ru.GetInner().Header().Key if bytes.HasPrefix(key, keys.TimeseriesPrefix) { @@ -129,7 +129,7 @@ func TestProberDoesReadsAndWrites(t *testing.T) { s, _, p, cleanup := initTestProber(t, base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(i context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(i context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if !dbIsAvailable.Get() { for _, ru := range ba.Requests { if ru.GetGet() != nil { @@ -174,7 +174,7 @@ func TestProberDoesReadsAndWrites(t *testing.T) { s, _, p, cleanup := initTestProber(t, base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(i context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(i context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if !dbIsAvailable.Get() { for _, ru := range ba.Requests { if ru.GetPut() != nil { diff --git a/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go b/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go index d5af51a304d8..6012e5305db8 100644 --- a/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go +++ b/pkg/kv/kvserver/batcheval/cmd_add_sstable_test.go @@ -1632,7 +1632,7 @@ func TestAddSSTableIntentResolution(t *testing.T) { pointKV("b", 1, "2"), pointKV("c", 1, "3"), }) - ba := roachpb.BatchRequest{ + ba := &roachpb.BatchRequest{ Header: roachpb.Header{UserPriority: roachpb.MaxUserPriority}, } ba.Add(&roachpb.AddSSTableRequest{ @@ -1676,7 +1676,7 @@ func TestAddSSTableSSTTimestampToRequestTimestampRespectsTSCache(t *testing.T) { MVCCStats: storageutils.SSTStats(t, sst, 0), SSTTimestampToRequestTimestamp: hlc.Timestamp{WallTime: 1}, } - ba := roachpb.BatchRequest{ + ba := &roachpb.BatchRequest{ Header: roachpb.Header{Timestamp: txnTS.Prev()}, } ba.Add(sstReq) @@ -1691,7 +1691,7 @@ func TestAddSSTableSSTTimestampToRequestTimestampRespectsTSCache(t *testing.T) { // Adding the SST again and reading results in the new value, because the // tscache pushed the SST forward. - ba = roachpb.BatchRequest{ + ba = &roachpb.BatchRequest{ Header: roachpb.Header{Timestamp: txnTS.Prev()}, } ba.Add(sstReq) @@ -1736,7 +1736,7 @@ func TestAddSSTableSSTTimestampToRequestTimestampRespectsClosedTS(t *testing.T) MVCCStats: storageutils.SSTStats(t, sst, 0), SSTTimestampToRequestTimestamp: hlc.Timestamp{WallTime: 1}, } - ba := roachpb.BatchRequest{ + ba := &roachpb.BatchRequest{ Header: roachpb.Header{Timestamp: reqTS}, } ba.Add(sstReq) diff --git a/pkg/kv/kvserver/batcheval/cmd_is_span_empty_test.go b/pkg/kv/kvserver/batcheval/cmd_is_span_empty_test.go index 322b652de411..e06c84764a8b 100644 --- a/pkg/kv/kvserver/batcheval/cmd_is_span_empty_test.go +++ b/pkg/kv/kvserver/batcheval/cmd_is_span_empty_test.go @@ -33,7 +33,7 @@ func TestIsSpanEmpty(t *testing.T) { ServerArgs: base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, request roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, request *roachpb.BatchRequest) *roachpb.Error { if _, exists := request.GetArg(roachpb.IsSpanEmpty); exists { atomic.AddInt64(&sentIsSpanEmptyRequests, 1) } diff --git a/pkg/kv/kvserver/client_lease_test.go b/pkg/kv/kvserver/client_lease_test.go index 625ba285d397..8b07c933c606 100644 --- a/pkg/kv/kvserver/client_lease_test.go +++ b/pkg/kv/kvserver/client_lease_test.go @@ -631,7 +631,7 @@ func TestStoreLeaseTransferTimestampCacheRead(t *testing.T) { // Read the key at readTS. // NB: don't use SendWrapped because we want access to br.Timestamp. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Timestamp = readTS ba.Add(getArgs(key)) br, pErr := tc.Servers[0].DistSender().Send(ctx, ba) @@ -649,7 +649,7 @@ func TestStoreLeaseTransferTimestampCacheRead(t *testing.T) { // Attempt to write under the read on the new leaseholder. The batch // should get forwarded to a timestamp after the read. // NB: don't use SendWrapped because we want access to br.Timestamp. - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Timestamp = readTS ba.Add(incrementArgs(key, 1)) br, pErr = tc.Servers[0].DistSender().Send(ctx, ba) @@ -1336,7 +1336,7 @@ func TestAcquireLeaseTimeout(t *testing.T) { // return the context error. var blockRangeID int32 - maybeBlockLeaseRequest := func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + maybeBlockLeaseRequest := func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if ba.IsSingleRequestLeaseRequest() && int32(ba.RangeID) == atomic.LoadInt32(&blockRangeID) { t.Logf("blocked lease request for r%d", ba.RangeID) <-ctx.Done() diff --git a/pkg/kv/kvserver/client_merge_test.go b/pkg/kv/kvserver/client_merge_test.go index fd1429f6a614..8b8dc45010bd 100644 --- a/pkg/kv/kvserver/client_merge_test.go +++ b/pkg/kv/kvserver/client_merge_test.go @@ -268,7 +268,7 @@ func mergeWithData(t *testing.T, retries int64) { manualClock := hlc.NewHybridManualClock() var store *kvserver.Store // Maybe inject some retryable errors when the merge transaction commits. - testingRequestFilter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + testingRequestFilter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { for _, req := range ba.Requests { if et := req.GetEndTxn(); et != nil && et.InternalCommitTrigger.GetMergeTrigger() != nil { if atomic.AddInt64(&retries, -1) >= 0 { @@ -443,7 +443,7 @@ func mergeCheckingTimestampCaches( // leader-leaseholder state. blockHBAndGCs chan struct{} } - testingRequestFilter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + testingRequestFilter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { filterMu.Lock() mergeCommitFilterCopy := filterMu.mergeCommitFilter blockHBAndGCsCopy := filterMu.blockHBAndGCs @@ -561,7 +561,7 @@ func mergeCheckingTimestampCaches( } // Simulate a read on the RHS from a node with a newer clock. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Timestamp = readTS ba.RangeID = rhsDesc.RangeID ba.Add(getArgs(rhsKey)) @@ -580,7 +580,7 @@ func mergeCheckingTimestampCaches( // the timestamp cache to record the abort. pushee := roachpb.MakeTransaction("pushee", rhsKey, roachpb.MinUserPriority, readTS, 0, 0) pusher := roachpb.MakeTransaction("pusher", rhsKey, roachpb.MaxUserPriority, readTS, 0, 0) - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Timestamp = readTS.Next() ba.RangeID = rhsDesc.RangeID ba.Add(pushTxnArgs(&pusher, &pushee, roachpb.PUSH_ABORT)) @@ -843,7 +843,7 @@ func mergeCheckingTimestampCaches( // After the merge, attempt to write under the read. The batch should get // forwarded to a timestamp after the read. - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Timestamp = readTS ba.RangeID = lhsDesc.RangeID ba.Add(incrementArgs(rhsKey, 1)) @@ -862,7 +862,7 @@ func mergeCheckingTimestampCaches( // application or a Raft snapshot. Either way though, the transaction should // not be allowed to create its record. hb, hbH := heartbeatArgs(&pushee, tc.Servers[0].Clock().Now()) - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Header = hbH ba.RangeID = lhsDesc.RangeID ba.Add(hb) @@ -924,11 +924,11 @@ func TestStoreRangeMergeTimestampCacheCausality(t *testing.T) { var readTS hlc.Timestamp rhsKey := scratchKey("c") var tc *testcluster.TestCluster - testingRequestFilter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + testingRequestFilter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if ba.IsSingleSubsumeRequest() { // Before we execute a Subsume request, execute a read on the same store // at a much higher timestamp. - gba := roachpb.BatchRequest{} + gba := &roachpb.BatchRequest{} gba.RangeID = ba.RangeID gba.Timestamp = ba.Timestamp.Add(42 /* wallTime */, 0 /* logical */) gba.Add(getArgs(rhsKey)) @@ -1017,7 +1017,7 @@ func TestStoreRangeMergeTimestampCacheCausality(t *testing.T) { // Attempt to write at the same time as the read. The write's timestamp // should be forwarded to after the read. - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Timestamp = readTS ba.RangeID = lhsRangeDesc.RangeID ba.Add(incrementArgs(rhsKey, 1)) @@ -1065,7 +1065,7 @@ func TestStoreRangeMergeTxnFailure(t *testing.T) { // Install a store filter that maybe injects retryable errors into a merge // transaction before ultimately aborting the merge. var retriesBeforeFailure int64 - testingRequestFilter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + testingRequestFilter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { for _, req := range ba.Requests { if et := req.GetEndTxn(); et != nil && et.InternalCommitTrigger.GetMergeTrigger() != nil { if atomic.AddInt64(&retriesBeforeFailure, -1) >= 0 { @@ -1161,7 +1161,7 @@ func TestStoreRangeMergeTxnRefresh(t *testing.T) { var sawMergeRefresh int32 testingResponseFilter := func( - ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse, + ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse, ) *roachpb.Error { switch v := ba.Requests[0].GetInner().(type) { case *roachpb.ConditionalPutRequest: @@ -1668,7 +1668,7 @@ func TestStoreRangeMergeSplitRace_SplitWins(t *testing.T) { var mergePreSplit atomic.Value var splitCommit atomic.Value var mergeEndTxnTimestamp atomic.Value - testingRequestFilter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + testingRequestFilter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { for _, req := range ba.Requests { if get := req.GetGet(); get != nil && get.KeyLocking != lock.None { if v := lhsDescKey.Load(); v != nil && v.(roachpb.Key).Equal(get.Key) { @@ -1764,7 +1764,7 @@ func TestStoreRangeMergeRHSLeaseExpiration(t *testing.T) { // Install a hook to control when the merge transaction commits. mergeEndTxnReceived := make(chan *roachpb.Transaction, 10) // headroom in case the merge transaction retries finishMerge := make(chan struct{}) - testingRequestFilter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + testingRequestFilter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { for _, r := range ba.Requests { if et := r.GetEndTxn(); et != nil && et.InternalCommitTrigger.GetMergeTrigger() != nil { mergeEndTxnReceived <- ba.Txn @@ -1779,7 +1779,7 @@ func TestStoreRangeMergeRHSLeaseExpiration(t *testing.T) { const reqConcurrency = 10 var rhsSentinel roachpb.Key reqWaitingOnMerge := make(chan struct{}, reqConcurrency) - testingConcurrencyRetryFilter := func(_ context.Context, ba roachpb.BatchRequest, pErr *roachpb.Error) { + testingConcurrencyRetryFilter := func(_ context.Context, ba *roachpb.BatchRequest, pErr *roachpb.Error) { if _, ok := pErr.GetDetail().(*roachpb.MergeInProgressError); ok { for _, r := range ba.Requests { req := r.GetInner() @@ -1960,7 +1960,7 @@ func TestStoreRangeMergeRHSLeaseTransfers(t *testing.T) { var once sync.Once subsumeReceived := make(chan struct{}) finishSubsume := make(chan struct{}) - testingRequestFilter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + testingRequestFilter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if ba.IsSingleSubsumeRequest() { once.Do(func() { subsumeReceived <- struct{}{} @@ -2057,7 +2057,7 @@ func TestStoreRangeMergeLHSLeaseTransfersAfterFreezeTime(t *testing.T) { var once sync.Once subsumeReceived := make(chan struct{}) finishSubsume := make(chan struct{}) - testingResponseFilter := func(_ context.Context, ba roachpb.BatchRequest, _ *roachpb.BatchResponse) *roachpb.Error { + testingResponseFilter := func(_ context.Context, ba *roachpb.BatchRequest, _ *roachpb.BatchResponse) *roachpb.Error { if ba.IsSingleSubsumeRequest() { once.Do(func() { subsumeReceived <- struct{}{} @@ -2133,7 +2133,7 @@ func TestStoreRangeMergeLHSLeaseTransfersAfterFreezeTime(t *testing.T) { // Attempt to write below the closed timestamp, to the subsumed keyspace. // The write's timestamp should be forwarded to after the closed timestamp. // If it is not, we have violated the closed timestamp's promise! - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Timestamp = lhsClosedTS.Prev() ba.RangeID = lhsDesc.RangeID ba.Add(incrementArgs(rhsDesc.StartKey.AsRawKey().Next(), 1)) @@ -2160,7 +2160,7 @@ func TestStoreRangeMergeCheckConsistencyAfterSubsumption(t *testing.T) { // Install a hook to control when the merge transaction aborts. mergeEndTxnReceived := make(chan *roachpb.Transaction, 10) // headroom in case the merge transaction retries abortMergeTxn := make(chan struct{}) - testingRequestFilter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + testingRequestFilter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { for _, r := range ba.Requests { if et := r.GetEndTxn(); et != nil && et.InternalCommitTrigger.GetMergeTrigger() != nil { mergeEndTxnReceived <- ba.Txn @@ -2246,7 +2246,7 @@ func TestStoreRangeMergeConcurrentRequests(t *testing.T) { var store *kvserver.Store manualClock := hlc.NewHybridManualClock() testingResponseFilter := func( - ctx context.Context, ba roachpb.BatchRequest, _ *roachpb.BatchResponse, + ctx context.Context, ba *roachpb.BatchRequest, _ *roachpb.BatchResponse, ) *roachpb.Error { cput := ba.Requests[0].GetConditionalPut() if cput != nil && !cput.Value.IsPresent() && bytes.HasSuffix(cput.Key, keys.LocalRangeDescriptorSuffix) && rand.Int()%4 == 0 { @@ -3076,7 +3076,7 @@ func TestStoreRangeMergeDeadFollowerDuringTxn(t *testing.T) { ctx := context.Background() var tc *testcluster.TestCluster - testingRequestFilter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + testingRequestFilter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if ba.IsSingleSubsumeRequest() { tc.StopServer(2) // This is safe to call multiple times, it will only stop once } @@ -3420,7 +3420,7 @@ func testMergeWatcher(t *testing.T, injectFailures bool) { // Maybe inject some retryable errors when the merge transaction commits. lhsExpectedKey := scratchRangeDescriptorKey() - testingRequestFilter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + testingRequestFilter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { for _, req := range ba.Requests { if et := req.GetEndTxn(); et != nil && et.InternalCommitTrigger.GetMergeTrigger() != nil { if atomic.AddInt64(&mergeTxnRetries, -1) >= 0 { @@ -3567,7 +3567,7 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { } } - testingRequestFilter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + testingRequestFilter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { for _, req := range ba.Requests { // We can detect PushTxn requests generated by the watcher goroutine // because they use the minimum transaction priority. Note that we @@ -3593,7 +3593,7 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { var sawMeta2Req int64 meta2CKey := keys.RangeMetaKey(cKey).AsRawKey() testingResponseFilter := func( - ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse, + ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse, ) *roachpb.Error { for i, req := range ba.Requests { if g := req.GetGet(); g != nil && g.Key.Equal(meta2CKey) && br.Responses[i].GetGet().Value == nil { @@ -5000,7 +5000,7 @@ func setupClusterWithSubsumedRange( MaxOffset: testMaxOffset, TestingRequestFilter: filter.SuspendMergeTrigger, TestingConcurrencyRetryFilter: func( - ctx context.Context, ba roachpb.BatchRequest, pErr *roachpb.Error, + ctx context.Context, ba *roachpb.BatchRequest, pErr *roachpb.Error, ) { if _, ok := pErr.GetDetail().(*roachpb.MergeInProgressError); ok { atomic.AddInt32(&blockedRequestCount, 1) diff --git a/pkg/kv/kvserver/client_metrics_test.go b/pkg/kv/kvserver/client_metrics_test.go index 772d7a4eaf95..a8e4c2a442e4 100644 --- a/pkg/kv/kvserver/client_metrics_test.go +++ b/pkg/kv/kvserver/client_metrics_test.go @@ -204,7 +204,7 @@ func TestStoreResolveMetrics(t *testing.T) { const resolveAbortCount = int64(800) const resolvePoisonCount = int64(2400) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} { repl := store.LookupReplica(keys.MustAddr(span.Key)) var err error diff --git a/pkg/kv/kvserver/client_raft_test.go b/pkg/kv/kvserver/client_raft_test.go index 4bd72e4511a3..0ab00292a17f 100644 --- a/pkg/kv/kvserver/client_raft_test.go +++ b/pkg/kv/kvserver/client_raft_test.go @@ -2248,7 +2248,7 @@ func TestQuotaPool(t *testing.T) { // to be the same as what we started with. keyToWrite := key.Next() value := bytes.Repeat([]byte("v"), (3*quota)/4) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(putArgs(keyToWrite, value)) if err := ba.SetActiveTimestamp(tc.Servers[0].Clock()); err != nil { t.Fatal(err) @@ -2269,7 +2269,7 @@ func TestQuotaPool(t *testing.T) { }) go func() { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(putArgs(keyToWrite, value)) if err := ba.SetActiveTimestamp(tc.Servers[0].Clock()); err != nil { ch <- roachpb.NewError(err) @@ -2359,7 +2359,7 @@ func TestWedgedReplicaDetection(t *testing.T) { // Send a request to the leader replica. followerRepl is locked so it will // not respond. value := []byte("value") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(putArgs(key, value)) if err := ba.SetActiveTimestamp(tc.Servers[0].Clock()); err != nil { t.Fatal(err) @@ -2669,7 +2669,7 @@ func TestReplicaRemovalCampaign(t *testing.T) { replica2 := store0.LookupReplica(roachpb.RKey(key2)) rg2 := func(s *kvserver.Store) kv.Sender { - return kv.Wrap(s, func(ba roachpb.BatchRequest) roachpb.BatchRequest { + return kv.Wrap(s, func(ba *roachpb.BatchRequest) *roachpb.BatchRequest { if ba.RangeID == 0 { ba.RangeID = replica2.RangeID } @@ -4353,7 +4353,7 @@ func TestStoreRangeWaitForApplication(t *testing.T) { var filterRangeIDAtomic int64 ctx := context.Background() - testingRequestFilter := func(_ context.Context, ba roachpb.BatchRequest) (retErr *roachpb.Error) { + testingRequestFilter := func(_ context.Context, ba *roachpb.BatchRequest) (retErr *roachpb.Error) { if rangeID := roachpb.RangeID(atomic.LoadInt64(&filterRangeIDAtomic)); rangeID != ba.RangeID { return nil } @@ -5449,14 +5449,14 @@ func TestReplicaRemovalClosesProposalQuota(t *testing.T) { ServerArgs: base.TestServerArgs{ Knobs: base.TestingKnobs{Store: &kvserver.StoreTestingKnobs{ DisableReplicaGCQueue: true, - TestingRequestFilter: kvserverbase.ReplicaRequestFilter(func(_ context.Context, r roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(_ context.Context, r *roachpb.BatchRequest) *roachpb.Error { if r.RangeID == roachpb.RangeID(atomic.LoadInt64(&rangeID)) { if _, isPut := r.GetArg(roachpb.Put); isPut { atomic.AddInt64(&putRequestCount, 1) } } return nil - }), + }, }}, RaftConfig: base.RaftConfig{ // Set the proposal quota to a tiny amount so that each write will diff --git a/pkg/kv/kvserver/client_replica_backpressure_test.go b/pkg/kv/kvserver/client_replica_backpressure_test.go index bdd83322498e..d2780aae98cd 100644 --- a/pkg/kv/kvserver/client_replica_backpressure_test.go +++ b/pkg/kv/kvserver/client_replica_backpressure_test.go @@ -71,7 +71,7 @@ func TestBackpressureNotAppliedWhenReducingRangeSize(t *testing.T) { ServerArgs: base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if ba.Header.Txn != nil && ba.Header.Txn.Name == "split" && !allowSplits.Load().(bool) { rangesBlocked.Store(ba.Header.RangeID, true) defer rangesBlocked.Delete(ba.Header.RangeID) diff --git a/pkg/kv/kvserver/client_replica_circuit_breaker_test.go b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go index 2f6ce49a27c0..96d36335ff83 100644 --- a/pkg/kv/kvserver/client_replica_circuit_breaker_test.go +++ b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go @@ -888,7 +888,7 @@ func (cbt *circuitBreakerTest) SendCtxTS( rec := finishAndGet() cbt.t.Logf("%s", rec) }() - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} repl := cbt.repls[idx] ba.RangeID = repl.Desc().RangeID ba.Timestamp = ts @@ -929,7 +929,7 @@ func (cbt *circuitBreakerTest) sendViaDistSender( ds *kvcoord.DistSender, req roachpb.Request, ) error { cbt.t.Helper() - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(req) ctx, cancel := context.WithTimeout(context.Background(), testutils.DefaultSucceedsSoonDuration) defer cancel() diff --git a/pkg/kv/kvserver/client_replica_test.go b/pkg/kv/kvserver/client_replica_test.go index bb37e16c564b..1468695cf0d9 100644 --- a/pkg/kv/kvserver/client_replica_test.go +++ b/pkg/kv/kvserver/client_replica_test.go @@ -836,7 +836,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { // flakiness. For now, we just re-order the operations and assert that we // receive an uncertainty error even though its absence would not be a true // stale read. - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Add(putArgs(keyA, []byte("val"))) br, pErr := tc.Servers[0].DistSender().Send(ctx, ba) require.Nil(t, pErr) @@ -1069,7 +1069,7 @@ func TestNonTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { type nonTxnGetKey struct{} nonTxnOrigTsC := make(chan hlc.Timestamp, 1) nonTxnBlockerC := make(chan struct{}) - requestFilter := func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + requestFilter := func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if ctx.Value(nonTxnGetKey{}) != nil { // Give the test the server-assigned timestamp. require.NotNil(t, ba.TimestampFromServerClock) @@ -1084,7 +1084,7 @@ func TestNonTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { return nil } var uncertaintyErrs int32 - concurrencyRetryFilter := func(ctx context.Context, _ roachpb.BatchRequest, pErr *roachpb.Error) { + concurrencyRetryFilter := func(ctx context.Context, _ *roachpb.BatchRequest, pErr *roachpb.Error) { if ctx.Value(nonTxnGetKey{}) != nil { if _, ok := pErr.GetDetail().(*roachpb.ReadWithinUncertaintyIntervalError); ok { atomic.AddInt32(&uncertaintyErrs, 1) @@ -1146,7 +1146,7 @@ func TestNonTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { nonTxnRespC := make(chan resp, 1) _ = tc.Stopper().RunAsyncTask(ctx, "non-txn get", func(ctx context.Context) { ctx = context.WithValue(ctx, nonTxnGetKey{}, "foo") - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.RangeID = desc.RangeID ba.Add(getArgs(key)) br, pErr := tc.GetFirstStoreFromServer(t, 1).Send(ctx, ba) @@ -1181,7 +1181,7 @@ func TestNonTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { // possible that we could avoid flakiness. For now, we just re-order the // operations and assert that we observe an uncertainty error even though its // absence would not be a true stale read. - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Add(putArgs(key, []byte("val"))) br, pErr := tc.Servers[0].DistSender().Send(ctx, ba) require.Nil(t, pErr) @@ -2565,7 +2565,7 @@ func TestRangeInfoAfterSplit(t *testing.T) { } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - ba := roachpb.BatchRequest{ + ba := &roachpb.BatchRequest{ Header: roachpb.Header{ RangeID: tc.rangeID, ClientRangeInfo: roachpb.ClientRangeInfo{ @@ -2903,7 +2903,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { // Read the key at readTS. // NB: don't use SendWrapped because we want access to br.Timestamp. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Timestamp = readTS ba.Add(getArgs(keyA)) br, pErr := tc.Servers[0].DistSender().Send(ctx, ba) @@ -2968,7 +2968,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { // snapshot had an empty timestamp cache and would simply let us write // under the previous read. // NB: don't use SendWrapped because we want access to br.Timestamp. - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Timestamp = readTS ba.Add(incrementArgs(keyA, 1)) br, pErr = tc.Servers[0].DistSender().Send(ctx, ba) @@ -3007,7 +3007,7 @@ func TestLeaseTransferRejectedIfTargetNeedsSnapshot(t *testing.T) { ServerArgs: base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if rejectAfterRevoke && ba.IsSingleTransferLeaseRequest() { transferLeaseReqBlockOnce.Do(func() { close(transferLeaseReqBlockedC) @@ -3751,7 +3751,7 @@ func TestAdminRelocateRangeSafety(t *testing.T) { var useSeenAdd atomic.Value useSeenAdd.Store(false) seenAdd := make(chan struct{}, 1) - responseFilter := func(ctx context.Context, ba roachpb.BatchRequest, _ *roachpb.BatchResponse) *roachpb.Error { + responseFilter := func(ctx context.Context, ba *roachpb.BatchRequest, _ *roachpb.BatchResponse) *roachpb.Error { if ba.IsSingleRequest() { changeReplicas, ok := ba.Requests[0].GetInner().(*roachpb.AdminChangeReplicasRequest) if ok && changeReplicas.Changes()[0].ChangeType == roachpb.ADD_VOTER && useSeenAdd.Load().(bool) { @@ -3882,7 +3882,7 @@ func TestChangeReplicasLeaveAtomicRacesWithMerge(t *testing.T) { var rangeToBlockRangeDescriptorRead atomic.Value rangeToBlockRangeDescriptorRead.Store(roachpb.RangeID(0)) blockRangeDescriptorReadChan := make(chan struct{}, 1) - blockOnChangeReplicasRead := kvserverbase.ReplicaRequestFilter(func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + blockOnChangeReplicasRead := func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if req, isGet := ba.GetArg(roachpb.Get); !isGet || ba.RangeID != rangeToBlockRangeDescriptorRead.Load().(roachpb.RangeID) || !ba.IsSingleRequest() || @@ -3897,7 +3897,7 @@ func TestChangeReplicasLeaveAtomicRacesWithMerge(t *testing.T) { default: } return nil - }) + } tc := testcluster.StartTestCluster(t, numNodes, base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ Knobs: base.TestingKnobs{ @@ -4593,7 +4593,7 @@ func TestDiscoverIntentAcrossLeaseTransferAwayAndBack(t *testing.T) { // Detect when txn4 discovers txn3's intent and begins to push. var txn4ID atomic.Value txn4PushingC := make(chan struct{}, 1) - requestFilter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + requestFilter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if !ba.IsSinglePushTxnRequest() { return nil } diff --git a/pkg/kv/kvserver/client_split_test.go b/pkg/kv/kvserver/client_split_test.go index 3c4a981895f3..07afc6ee8db1 100644 --- a/pkg/kv/kvserver/client_split_test.go +++ b/pkg/kv/kvserver/client_split_test.go @@ -1226,7 +1226,7 @@ func TestStoreRangeSplitBackpressureWrites(t *testing.T) { zoneConfig.RangeMaxBytes = proto.Int64(maxBytes) testingRequestFilter := - func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { for _, req := range ba.Requests { if cPut, ok := req.GetInner().(*roachpb.ConditionalPutRequest); ok { if cPut.Key.Equal(keys.RangeDescriptorKey(splitKey)) { @@ -1793,7 +1793,7 @@ func TestStoreSplitOnRemovedReplica(t *testing.T) { inFilter := make(chan struct{}, 1) beginBlockingSplit := make(chan struct{}) finishBlockingSplit := make(chan struct{}) - filter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + filter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { // Block replica 1's attempt to perform the AdminSplit. We detect the // split's range descriptor update and block until the rest of the test // is ready. We then return a ConditionFailedError, simulating a @@ -2483,7 +2483,7 @@ func TestDistributedTxnCleanup(t *testing.T) { // This simulates txn deadlock or a max priority txn aborting a // normal or min priority txn. if force { - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Timestamp = store.Clock().Now() ba.RangeID = lhs.RangeID ba.Add(&roachpb.PushTxnRequest{ @@ -3022,7 +3022,7 @@ func TestStoreSplitRangeLookupRace(t *testing.T) { blockedRangeLookups := int32(0) rangeLookupIsBlocked := make(chan struct{}, 1) unblockRangeLookups := make(chan struct{}) - respFilter := func(ctx context.Context, ba roachpb.BatchRequest, _ *roachpb.BatchResponse) *roachpb.Error { + respFilter := func(ctx context.Context, ba *roachpb.BatchRequest, _ *roachpb.BatchResponse) *roachpb.Error { select { case <-blockRangeLookups: if kv.TestingIsRangeLookup(ba) && @@ -3555,7 +3555,7 @@ func TestStoreRangeSplitAndMergeWithGlobalReads(t *testing.T) { // necessary, see maybeCommitWaitBeforeCommitTrigger. var clock atomic.Value var splitsWithSyntheticTS, mergesWithSyntheticTS int64 - respFilter := func(ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { + respFilter := func(ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { if req, ok := ba.GetArg(roachpb.EndTxn); ok { endTxn := req.(*roachpb.EndTxnRequest) if br.Txn.Status == roachpb.COMMITTED && br.Txn.WriteTimestamp.Synthetic { diff --git a/pkg/kv/kvserver/closed_timestamp_test.go b/pkg/kv/kvserver/closed_timestamp_test.go index 8b15fa14a611..1acfd480404b 100644 --- a/pkg/kv/kvserver/closed_timestamp_test.go +++ b/pkg/kv/kvserver/closed_timestamp_test.go @@ -102,7 +102,7 @@ func TestClosedTimestampCanServe(t *testing.T) { // We just served a follower read. As a sanity check, make sure that we can't write at // that same timestamp. { - var baWrite roachpb.BatchRequest + baWrite := &roachpb.BatchRequest{} r := &roachpb.DeleteRequest{} r.Key = desc.StartKey.AsRawKey() txn := roachpb.MakeTransaction("testwrite", r.Key, roachpb.NormalUserPriority, ts, 100, int32(tc.Server(0).SQLInstanceID())) @@ -546,7 +546,7 @@ func TestClosedTimestampCantServeForNonTransactionalReadRequest(t *testing.T) { }) // Create a "nontransactional" read-only batch. - var baQueryTxn roachpb.BatchRequest + baQueryTxn := &roachpb.BatchRequest{} baQueryTxn.Header.RangeID = desc.RangeID r := &roachpb.QueryTxnRequest{} r.Key = desc.StartKey.AsRawKey() @@ -812,7 +812,7 @@ SET CLUSTER SETTING kv.closed_timestamp.follower_reads_enabled = true; require.NoError(t, err) writeTime := rhsLeaseStart.Prev() require.True(t, mergedLeaseholder.GetCurrentClosedTimestamp(ctx).Less(writeTime)) - var baWrite roachpb.BatchRequest + baWrite := &roachpb.BatchRequest{} baWrite.Header.RangeID = leftDesc.RangeID baWrite.Header.Timestamp = writeTime put := &roachpb.PutRequest{} @@ -935,7 +935,7 @@ func (filter *mergeFilter) resetBlocker() (*mergeBlocker, bool) { // Communication with actors interested in blocked merges is done through // BlockNextMerge(). func (filter *mergeFilter) SuspendMergeTrigger( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) *roachpb.Error { for _, req := range ba.Requests { if et := req.GetEndTxn(); et != nil && et.Commit && @@ -1064,7 +1064,7 @@ func getTargetStore( } func verifyNotLeaseHolderErrors( - t *testing.T, ba roachpb.BatchRequest, repls []*kvserver.Replica, expectedNLEs int, + t *testing.T, ba *roachpb.BatchRequest, repls []*kvserver.Replica, expectedNLEs int, ) { t.Helper() notLeaseholderErrs, err := countNotLeaseHolderErrors(ba, repls) @@ -1076,7 +1076,7 @@ func verifyNotLeaseHolderErrors( } } -func countNotLeaseHolderErrors(ba roachpb.BatchRequest, repls []*kvserver.Replica) (int64, error) { +func countNotLeaseHolderErrors(ba *roachpb.BatchRequest, repls []*kvserver.Replica) (int64, error) { g, ctx := errgroup.WithContext(context.Background()) var notLeaseholderErrs int64 for i := range repls { @@ -1303,7 +1303,7 @@ func expectRows(expectedRows int) respFunc { func verifyCanReadFromAllRepls( ctx context.Context, t *testing.T, - baRead roachpb.BatchRequest, + baRead *roachpb.BatchRequest, repls []*kvserver.Replica, f respFunc, ) error { @@ -1331,10 +1331,10 @@ func verifyCanReadFromAllRepls( return g.Wait() } -func makeTxnReadBatchForDesc(desc roachpb.RangeDescriptor, ts hlc.Timestamp) roachpb.BatchRequest { +func makeTxnReadBatchForDesc(desc roachpb.RangeDescriptor, ts hlc.Timestamp) *roachpb.BatchRequest { txn := roachpb.MakeTransaction("txn", nil, 0, ts, 0, 0) - var baRead roachpb.BatchRequest + baRead := &roachpb.BatchRequest{} baRead.Header.RangeID = desc.RangeID baRead.Header.Timestamp = ts baRead.Header.Txn = &txn diff --git a/pkg/kv/kvserver/consistency_queue_test.go b/pkg/kv/kvserver/consistency_queue_test.go index 54a1669cfa80..71ac7ecaad83 100644 --- a/pkg/kv/kvserver/consistency_queue_test.go +++ b/pkg/kv/kvserver/consistency_queue_test.go @@ -169,7 +169,7 @@ func TestCheckConsistencyReplay(t *testing.T) { // Arrange to trigger a retry when a ComputeChecksum request arrives. testKnobs.TestingResponseFilter = func( - ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse, + ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse, ) *roachpb.Error { state.Lock() defer state.Unlock() diff --git a/pkg/kv/kvserver/helpers_test.go b/pkg/kv/kvserver/helpers_test.go index 1d4032700165..7d3956ed3bae 100644 --- a/pkg/kv/kvserver/helpers_test.go +++ b/pkg/kv/kvserver/helpers_test.go @@ -409,7 +409,7 @@ func MakeSSTable( } func ProposeAddSSTable(ctx context.Context, key, val string, ts hlc.Timestamp, store *Store) error { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.RangeID = store.LookupReplica(roachpb.RKey(key)).RangeID var addReq roachpb.AddSSTableRequest diff --git a/pkg/kv/kvserver/intent_resolver_integration_test.go b/pkg/kv/kvserver/intent_resolver_integration_test.go index a21ed2a23c1c..9470fe925808 100644 --- a/pkg/kv/kvserver/intent_resolver_integration_test.go +++ b/pkg/kv/kvserver/intent_resolver_integration_test.go @@ -48,7 +48,7 @@ func beginTransaction( return txn } - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn} put := putArgs(key, []byte("value")) ba.Add(&put) @@ -240,7 +240,7 @@ func TestReliableIntentCleanup(t *testing.T) { return readyC } - requestFilter := func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + requestFilter := func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { // If we receive a heartbeat from a txn in abortHeartbeats, // close the aborted channel and return an error response. if _, ok := ba.GetArg(roachpb.HeartbeatTxn); ok && ba.Txn != nil { @@ -271,7 +271,7 @@ func TestReliableIntentCleanup(t *testing.T) { return nil } - responseFilter := func(ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { + responseFilter := func(ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { // If we receive a Put request from a txn in blockPuts, signal // the caller that the Put is ready to block by passing it an // unblock channel, and wait for it to close. diff --git a/pkg/kv/kvserver/intentresolver/intent_resolver_test.go b/pkg/kv/kvserver/intentresolver/intent_resolver_test.go index 7a7a7b9ab869..d32808ca0675 100644 --- a/pkg/kv/kvserver/intentresolver/intent_resolver_test.go +++ b/pkg/kv/kvserver/intentresolver/intent_resolver_test.go @@ -365,7 +365,7 @@ func TestCleanupMultipleIntentsAsync(t *testing.T) { pushed []string resolved []string } - pushOrResolveFunc := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + pushOrResolveFunc := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { switch ba.Requests[0].GetInner().Method() { case roachpb.PushTxn: for _, ru := range ba.Requests { @@ -469,7 +469,7 @@ func TestCleanupTxnIntentsAsyncWithPartialRollback(t *testing.T) { txn.IgnoredSeqNums = []enginepb.IgnoredSeqNumRange{{Start: 1, End: 1}} var gotResolveIntent, gotResolveIntentRange int32 - check := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + check := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { for _, r := range ba.Requests { if ri, ok := r.GetInner().(*roachpb.ResolveIntentRequest); ok { atomic.StoreInt32(&gotResolveIntent, 1) @@ -627,7 +627,7 @@ func TestCleanupMultipleTxnIntentsAsync(t *testing.T) { resolved []string gced []string } - resolveOrGCFunc := func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + resolveOrGCFunc := func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if len(ba.Requests) != 1 { return nil, roachpb.NewErrorf("unexpected") } @@ -783,13 +783,13 @@ func makeTxnIntents(t *testing.T, clock *hlc.Clock, numIntents int) []roachpb.In // the IntentResolver tries to send. They are used in conjunction with the below // function to create an IntentResolver with a slice of sendFuncs. // A library of useful sendFuncs are defined below. -type sendFunc func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) +type sendFunc func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) func newIntentResolverWithSendFuncs( c Config, sf *sendFuncs, stopper *stop.Stopper, ) *IntentResolver { txnSenderFactory := kv.NonTransactionalFactoryFunc( - func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { sf.mu.Lock() defer sf.mu.Unlock() f := sf.popLocked() @@ -805,7 +805,7 @@ func newIntentResolverWithSendFuncs( func pushTxnSendFuncs(sf *sendFuncs, N int) sendFunc { toPush := int64(N) var f sendFunc - f = func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + f = func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if remaining := atomic.LoadInt64(&toPush); len(ba.Requests) > int(remaining) { sf.t.Errorf("expected at most %d PushTxnRequests in batch, got %d", remaining, len(ba.Requests)) @@ -820,7 +820,7 @@ func pushTxnSendFuncs(sf *sendFuncs, N int) sendFunc { } func pushTxnSendFunc(t *testing.T, numPushes int) sendFunc { - return func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + return func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if len(ba.Requests) != numPushes { t.Errorf("expected %d PushTxnRequests in batch, got %d", numPushes, len(ba.Requests)) @@ -856,7 +856,7 @@ func resolveIntentsSendFuncsEx( toResolve := int64(numIntents) reqsSeen := int64(0) var f sendFunc - f = func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + f = func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if remaining := atomic.LoadInt64(&toResolve); len(ba.Requests) > int(remaining) { sf.t.Errorf("expected at most %d ResolveIntentRequests in batch, got %d", remaining, len(ba.Requests)) @@ -875,7 +875,7 @@ func resolveIntentsSendFuncsEx( } func resolveIntentsSendFuncEx(t *testing.T, checkTxnStatusOpt checkTxnStatusOpt) sendFunc { - return func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + return func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { return respForResolveIntentBatch(t, ba, checkTxnStatusOpt), nil } } @@ -892,12 +892,12 @@ func resolveIntentsSendFunc(t *testing.T) sendFunc { return resolveIntentsSendFuncEx(t, dontCheckTxnStatus) } -func failSendFunc(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { +func failSendFunc(*roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { return nil, roachpb.NewError(fmt.Errorf("boom")) } func gcSendFunc(t *testing.T) sendFunc { - return func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + return func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { resp := &roachpb.BatchResponse{} for _, r := range ba.Requests { if _, ok := r.GetInner().(*roachpb.GCRequest); !ok { @@ -909,7 +909,7 @@ func gcSendFunc(t *testing.T) sendFunc { } } -func respForPushTxnBatch(t *testing.T, ba roachpb.BatchRequest) *roachpb.BatchResponse { +func respForPushTxnBatch(t *testing.T, ba *roachpb.BatchRequest) *roachpb.BatchResponse { resp := &roachpb.BatchResponse{} for _, r := range ba.Requests { var txn enginepb.TxnMeta @@ -929,7 +929,7 @@ func respForPushTxnBatch(t *testing.T, ba roachpb.BatchRequest) *roachpb.BatchRe } func respForResolveIntentBatch( - t *testing.T, ba roachpb.BatchRequest, checkTxnStatusOpt checkTxnStatusOpt, + t *testing.T, ba *roachpb.BatchRequest, checkTxnStatusOpt checkTxnStatusOpt, ) *roachpb.BatchResponse { resp := &roachpb.BatchResponse{} var status roachpb.TransactionStatus diff --git a/pkg/kv/kvserver/kvserverbase/base.go b/pkg/kv/kvserver/kvserverbase/base.go index 73cd847d4976..4e7a5caebbf7 100644 --- a/pkg/kv/kvserver/kvserverbase/base.go +++ b/pkg/kv/kvserver/kvserverbase/base.go @@ -88,11 +88,11 @@ func (f *FilterArgs) InRaftCmd() bool { // ReplicaRequestFilter can be used in testing to influence the error returned // from a request before it is evaluated. Return nil to continue with regular // processing or non-nil to terminate processing with the returned error. -type ReplicaRequestFilter func(context.Context, roachpb.BatchRequest) *roachpb.Error +type ReplicaRequestFilter func(context.Context, *roachpb.BatchRequest) *roachpb.Error // ReplicaConcurrencyRetryFilter can be used to examine a concurrency retry // error before it is handled and its batch is re-evaluated. -type ReplicaConcurrencyRetryFilter func(context.Context, roachpb.BatchRequest, *roachpb.Error) +type ReplicaConcurrencyRetryFilter func(context.Context, *roachpb.BatchRequest, *roachpb.Error) // ReplicaCommandFilter may be used in tests through the StoreTestingKnobs to // intercept the handling of commands and artificially generate errors. Return @@ -111,7 +111,7 @@ type ReplicaApplyFilter func(args ApplyFilterArgs) (int, *roachpb.Error) // ReplicaResponseFilter is used in unittests to modify the outbound // response returned to a waiting client after a replica command has // been processed. This filter is invoked only by the command proposer. -type ReplicaResponseFilter func(context.Context, roachpb.BatchRequest, *roachpb.BatchResponse) *roachpb.Error +type ReplicaResponseFilter func(context.Context, *roachpb.BatchRequest, *roachpb.BatchResponse) *roachpb.Error // ReplicaRangefeedFilter is used in unit tests to modify the request, inject // responses, or return errors from rangefeeds. diff --git a/pkg/kv/kvserver/merge_queue.go b/pkg/kv/kvserver/merge_queue.go index 2aba1502bbfe..1e3b4e51696c 100644 --- a/pkg/kv/kvserver/merge_queue.go +++ b/pkg/kv/kvserver/merge_queue.go @@ -186,7 +186,7 @@ func (mq *mergeQueue) requestRangeStats( ctx context.Context, key roachpb.Key, ) (desc *roachpb.RangeDescriptor, stats enginepb.MVCCStats, qps float64, qpsOK bool, err error) { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&roachpb.RangeStatsRequest{ RequestHeader: roachpb.RequestHeader{Key: key}, }) diff --git a/pkg/kv/kvserver/mvcc_gc_queue.go b/pkg/kv/kvserver/mvcc_gc_queue.go index b02fbda23ad4..7bbe1e5c867a 100644 --- a/pkg/kv/kvserver/mvcc_gc_queue.go +++ b/pkg/kv/kvserver/mvcc_gc_queue.go @@ -525,8 +525,7 @@ func (r *replicaGCer) send(ctx context.Context, req roachpb.GCRequest) error { n := atomic.AddInt32(&r.count, 1) log.Eventf(ctx, "sending batch %d (%d keys)", n, len(req.Keys)) - var ba roachpb.BatchRequest - + ba := &roachpb.BatchRequest{} // Technically not needed since we're talking directly to the Replica. ba.RangeID = r.repl.Desc().RangeID ba.Timestamp = r.repl.Clock().Now() @@ -567,7 +566,7 @@ func (r *replicaGCer) send(ctx context.Context, req roachpb.GCRequest) error { } ba.Replica.StoreID = r.storeID var err error - admissionHandle, err = r.admissionController.AdmitKVWork(ctx, roachpb.SystemTenantID, &ba) + admissionHandle, err = r.admissionController.AdmitKVWork(ctx, roachpb.SystemTenantID, ba) if err != nil { return err } diff --git a/pkg/kv/kvserver/mvcc_gc_queue_test.go b/pkg/kv/kvserver/mvcc_gc_queue_test.go index c71268a9b9db..bf610f33a41e 100644 --- a/pkg/kv/kvserver/mvcc_gc_queue_test.go +++ b/pkg/kv/kvserver/mvcc_gc_queue_test.go @@ -1380,7 +1380,7 @@ func TestMVCCGCQueueChunkRequests(t *testing.T) { fmtStr := fmt.Sprintf("%%0%dd", keySize) // First write 2 * gcKeyVersionChunkBytes different keys (each with two versions). - ba1, ba2 := roachpb.BatchRequest{}, roachpb.BatchRequest{} + ba1, ba2 := &roachpb.BatchRequest{}, &roachpb.BatchRequest{} for i := 0; i < 2*keyCount; i++ { // Create keys which are key := roachpb.Key(fmt.Sprintf(fmtStr, i)) @@ -1403,7 +1403,7 @@ func TestMVCCGCQueueChunkRequests(t *testing.T) { key1 := roachpb.Key(fmt.Sprintf(fmtStr, 2*keyCount)) key2 := roachpb.Key(fmt.Sprintf(fmtStr, 2*keyCount+1)) for i := 0; i < 2*keyCount+1; i++ { - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} // Only write keyCount+1 versions of key1. if i < keyCount+1 { pArgs1 := putArgs(key1, []byte(fmt.Sprintf("value%04d", i))) diff --git a/pkg/kv/kvserver/protectedts/ptcache/cache_test.go b/pkg/kv/kvserver/protectedts/ptcache/cache_test.go index a59eab7274b6..64ec9b9ab781 100644 --- a/pkg/kv/kvserver/protectedts/ptcache/cache_test.go +++ b/pkg/kv/kvserver/protectedts/ptcache/cache_test.go @@ -171,7 +171,7 @@ func TestRefresh(t *testing.T) { withCancel, cancel := context.WithCancel(ctx) defer cancel() done := make(chan struct{}) - st.setFilter(func(ba roachpb.BatchRequest) *roachpb.Error { + st.setFilter(func(ba *roachpb.BatchRequest) *roachpb.Error { if scanReq, ok := ba.GetArg(roachpb.Scan); ok { scan := scanReq.(*roachpb.ScanRequest) if scan.Span().Overlaps(metaTableSpan) { @@ -186,7 +186,7 @@ func TestRefresh(t *testing.T) { close(done) }) t.Run("error propagates while fetching metadata", func(t *testing.T) { - st.setFilter(func(ba roachpb.BatchRequest) *roachpb.Error { + st.setFilter(func(ba *roachpb.BatchRequest) *roachpb.Error { if scanReq, ok := ba.GetArg(roachpb.Scan); ok { scan := scanReq.(*roachpb.ScanRequest) if scan.Span().Overlaps(metaTableSpan) { @@ -200,7 +200,7 @@ func TestRefresh(t *testing.T) { }) t.Run("error propagates while fetching records", func(t *testing.T) { protect(t, s, p, s.Clock().Now(), metaTableSpan) - st.setFilter(func(ba roachpb.BatchRequest) *roachpb.Error { + st.setFilter(func(ba *roachpb.BatchRequest) *roachpb.Error { if scanReq, ok := ba.GetArg(roachpb.Scan); ok { scan := scanReq.(*roachpb.ScanRequest) if scan.Span().Overlaps(recordsTableSpan) { @@ -607,7 +607,7 @@ type scanTracker struct { mu syncutil.Mutex metaTableScans int recordsTableScans int - filterFunc func(ba roachpb.BatchRequest) *roachpb.Error + filterFunc func(ba *roachpb.BatchRequest) *roachpb.Error } func (st *scanTracker) resetCounters() { @@ -624,13 +624,13 @@ func (st *scanTracker) verifyCounters(t *testing.T, expMeta, expRecords int) { require.Equal(t, expRecords, st.recordsTableScans) } -func (st *scanTracker) setFilter(f func(roachpb.BatchRequest) *roachpb.Error) { +func (st *scanTracker) setFilter(f func(*roachpb.BatchRequest) *roachpb.Error) { st.mu.Lock() defer st.mu.Unlock() st.filterFunc = f } -func (st *scanTracker) requestFilter(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { +func (st *scanTracker) requestFilter(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { st.mu.Lock() defer st.mu.Unlock() if scanReq, ok := ba.GetArg(roachpb.Scan); ok { diff --git a/pkg/kv/kvserver/raft_log_queue_test.go b/pkg/kv/kvserver/raft_log_queue_test.go index 5b85299b9358..4cf10aaa981a 100644 --- a/pkg/kv/kvserver/raft_log_queue_test.go +++ b/pkg/kv/kvserver/raft_log_queue_test.go @@ -867,7 +867,7 @@ func TestTruncateLogRecompute(t *testing.T) { var v roachpb.Value v.SetBytes(bytes.Repeat([]byte("x"), RaftLogQueueStaleSize*5)) put := roachpb.NewPut(key, v) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(put) ba.RangeID = repl.RangeID diff --git a/pkg/kv/kvserver/replica_batch_updates.go b/pkg/kv/kvserver/replica_batch_updates.go index 52ea4f37d9fb..b6dda09e0efe 100644 --- a/pkg/kv/kvserver/replica_batch_updates.go +++ b/pkg/kv/kvserver/replica_batch_updates.go @@ -64,6 +64,7 @@ func maybeStripInFlightWrites(ba *roachpb.BatchRequest) (*roachpb.BatchRequest, et = &etAlloc.et et.InFlightWrites = nil et.LockSpans = et.LockSpans[:len(et.LockSpans):len(et.LockSpans)] // immutable + ba = ba.ShallowCopy() ba.Requests = append([]roachpb.RequestUnion(nil), ba.Requests...) ba.Requests[len(ba.Requests)-1].Value = &etAlloc.union diff --git a/pkg/kv/kvserver/replica_batch_updates_test.go b/pkg/kv/kvserver/replica_batch_updates_test.go index f314b2b502a5..8b92e3ba977e 100644 --- a/pkg/kv/kvserver/replica_batch_updates_test.go +++ b/pkg/kv/kvserver/replica_batch_updates_test.go @@ -92,10 +92,10 @@ func TestMaybeStripInFlightWrites(t *testing.T) { }, } for _, c := range testCases { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(c.reqs...) t.Run(fmt.Sprint(ba), func(t *testing.T) { - resBa, err := maybeStripInFlightWrites(&ba) + resBa, err := maybeStripInFlightWrites(ba) if c.expErr == "" { if err != nil { t.Errorf("expected no error, got %v", err) diff --git a/pkg/kv/kvserver/replica_circuit_breaker.go b/pkg/kv/kvserver/replica_circuit_breaker.go index cbb8ef005300..8a5ccd897b81 100644 --- a/pkg/kv/kvserver/replica_circuit_breaker.go +++ b/pkg/kv/kvserver/replica_circuit_breaker.go @@ -35,7 +35,7 @@ import ( type replicaInCircuitBreaker interface { Clock() *hlc.Clock Desc() *roachpb.RangeDescriptor - Send(context.Context, roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) + Send(context.Context, *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) slowReplicationThreshold(ba *roachpb.BatchRequest) (time.Duration, bool) replicaUnavailableError(err error) error poisonInflightLatches(err error) @@ -213,13 +213,13 @@ func sendProbe(ctx context.Context, r replicaInCircuitBreaker) error { if !desc.IsInitialized() { return nil } - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Timestamp = r.Clock().Now() ba.RangeID = r.Desc().RangeID probeReq := &roachpb.ProbeRequest{} probeReq.Key = desc.StartKey.AsRawKey() ba.Add(probeReq) - thresh, ok := r.slowReplicationThreshold(&ba) + thresh, ok := r.slowReplicationThreshold(ba) if !ok { // Breakers are disabled now. return nil diff --git a/pkg/kv/kvserver/replica_circuit_breaker_test.go b/pkg/kv/kvserver/replica_circuit_breaker_test.go index da00c0c6249f..5125be4c95bc 100644 --- a/pkg/kv/kvserver/replica_circuit_breaker_test.go +++ b/pkg/kv/kvserver/replica_circuit_breaker_test.go @@ -36,8 +36,6 @@ func TestReplicaUnavailableError(t *testing.T) { repls.AddReplica(roachpb.ReplicaDescriptor{NodeID: 1, StoreID: 10, ReplicaID: 100}) repls.AddReplica(roachpb.ReplicaDescriptor{NodeID: 2, StoreID: 20, ReplicaID: 200}) desc := roachpb.NewRangeDescriptor(10, roachpb.RKey("a"), roachpb.RKey("z"), repls) - var ba roachpb.BatchRequest - ba.Add(&roachpb.RequestLeaseRequest{}) lm := liveness.IsLiveMap{ 1: liveness.IsLiveMapEntry{IsLive: true}, } diff --git a/pkg/kv/kvserver/replica_closedts_internal_test.go b/pkg/kv/kvserver/replica_closedts_internal_test.go index a7e1e9b5bd73..925e0d47cf07 100644 --- a/pkg/kv/kvserver/replica_closedts_internal_test.go +++ b/pkg/kv/kvserver/replica_closedts_internal_test.go @@ -973,7 +973,7 @@ func TestServerSideBoundedStalenessNegotiation(t *testing.T) { tc.repl.mu.Unlock() // Construct and issue the request. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.RangeID = tc.rangeID ba.BoundedStaleness = &roachpb.BoundedStalenessHeader{ MinTimestampBound: test.minTSBound, @@ -1077,7 +1077,8 @@ func TestServerSideBoundedStalenessNegotiationWithResumeSpan(t *testing.T) { // get: [g] // get: [h] // - makeReq := func(maxKeys int) (ba roachpb.BatchRequest) { + makeReq := func(maxKeys int) *roachpb.BatchRequest { + ba := &roachpb.BatchRequest{} ba.BoundedStaleness = &roachpb.BoundedStalenessHeader{ MinTimestampBound: makeTS(5), } diff --git a/pkg/kv/kvserver/replica_closedts_test.go b/pkg/kv/kvserver/replica_closedts_test.go index 7713692f00f8..fc7c482612b4 100644 --- a/pkg/kv/kvserver/replica_closedts_test.go +++ b/pkg/kv/kvserver/replica_closedts_test.go @@ -112,7 +112,7 @@ func TestBumpSideTransportClosed(t *testing.T) { exp: false, knobs: func() (*kvserver.StoreTestingKnobs, chan chan struct{}) { mergeC := make(chan chan struct{}) - testingResponseFilter := func(ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { + testingResponseFilter := func(ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { if ba.IsSingleSubsumeRequest() { unblockC := make(chan struct{}) mergeC <- unblockC @@ -817,7 +817,7 @@ func TestNonBlockingReadsWithServerSideBoundedStalenessNegotiation(t *testing.T) // to block on an intent. Send to a specific store instead of through // a DistSender so that we'll hear an error (NotLeaseholderError) if // the request would otherwise be redirected to the leaseholder. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.RangeID = rangeID ba.BoundedStaleness = &roachpb.BoundedStalenessHeader{ MinTimestampBound: minTSBound, diff --git a/pkg/kv/kvserver/replica_follower_read.go b/pkg/kv/kvserver/replica_follower_read.go index 582595d429dc..6818365a8e20 100644 --- a/pkg/kv/kvserver/replica_follower_read.go +++ b/pkg/kv/kvserver/replica_follower_read.go @@ -36,7 +36,7 @@ var FollowerReadsEnabled = settings.RegisterBoolSetting( // BatchCanBeEvaluatedOnFollower determines if a batch consists exclusively of // requests that can be evaluated on a follower replica, given a sufficiently // advanced closed timestamp. -func BatchCanBeEvaluatedOnFollower(ba roachpb.BatchRequest) bool { +func BatchCanBeEvaluatedOnFollower(ba *roachpb.BatchRequest) bool { // Explanation of conditions: // 1. the batch cannot have or intend to receive a timestamp set from a // server-side clock. If a follower with a lagging clock sets its timestamp @@ -63,7 +63,7 @@ func BatchCanBeEvaluatedOnFollower(ba roachpb.BatchRequest) bool { // must be transactional and composed exclusively of this kind of request to be // accepted as a follower read. func (r *Replica) canServeFollowerReadRLocked(ctx context.Context, ba *roachpb.BatchRequest) bool { - eligible := BatchCanBeEvaluatedOnFollower(*ba) && FollowerReadsEnabled.Get(&r.store.cfg.Settings.SV) + eligible := BatchCanBeEvaluatedOnFollower(ba) && FollowerReadsEnabled.Get(&r.store.cfg.Settings.SV) if !eligible { // We couldn't do anything with the error, propagate it. return false diff --git a/pkg/kv/kvserver/replica_learner_test.go b/pkg/kv/kvserver/replica_learner_test.go index 06933e69aeec..7d9d2ee365a5 100644 --- a/pkg/kv/kvserver/replica_learner_test.go +++ b/pkg/kv/kvserver/replica_learner_test.go @@ -750,7 +750,7 @@ func TestSplitRetriesOnFailedExitOfJointConfig(t *testing.T) { var rangeIDAtomic int64 var rejectedCount int const maxRejects = 3 - reqFilter := func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + reqFilter := func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { rangeID := roachpb.RangeID(atomic.LoadInt64(&rangeIDAtomic)) if ba.RangeID == rangeID && ba.IsSingleTransferLeaseRequest() && rejectedCount < maxRejects { rejectedCount++ @@ -1215,7 +1215,7 @@ func TestLearnerAndVoterOutgoingFollowerRead(t *testing.T) { check := func() { ts := tc.Server(0).Clock().Now() txn := roachpb.MakeTransaction("txn", nil, 0, ts, 0, int32(tc.Server(0).SQLInstanceID())) - req := roachpb.BatchRequest{Header: roachpb.Header{ + req := &roachpb.BatchRequest{Header: roachpb.Header{ RangeID: scratchDesc.RangeID, Timestamp: ts, Txn: &txn, diff --git a/pkg/kv/kvserver/replica_probe_test.go b/pkg/kv/kvserver/replica_probe_test.go index 8c7917a42e8a..f1681471ed80 100644 --- a/pkg/kv/kvserver/replica_probe_test.go +++ b/pkg/kv/kvserver/replica_probe_test.go @@ -125,7 +125,7 @@ func TestReplicaProbeRequest(t *testing.T) { for _, srv := range tc.Servers { repl, _, err := srv.Stores().GetReplicaForRangeID(ctx, desc.RangeID) require.NoError(t, err) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(probeReq) ba.Timestamp = srv.Clock().Now() _, pErr := repl.Send(ctx, ba) @@ -143,7 +143,7 @@ func TestReplicaProbeRequest(t *testing.T) { for _, srv := range tc.Servers { repl, _, err := srv.Stores().GetReplicaForRangeID(ctx, desc.RangeID) require.NoError(t, err) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Timestamp = srv.Clock().Now() ba.Add(probeReq) _, pErr := repl.Send(ctx, ba) diff --git a/pkg/kv/kvserver/replica_proposal_buf_test.go b/pkg/kv/kvserver/replica_proposal_buf_test.go index 2dbb2d11e6d9..9572ca752c46 100644 --- a/pkg/kv/kvserver/replica_proposal_buf_test.go +++ b/pkg/kv/kvserver/replica_proposal_buf_test.go @@ -240,25 +240,25 @@ type proposalCreator struct { } func (pc proposalCreator) newPutProposal(ts hlc.Timestamp) *ProposalData { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&roachpb.PutRequest{}) ba.Timestamp = ts return pc.newProposal(ba) } func (pc proposalCreator) newLeaseRequestProposal(lease roachpb.Lease) *ProposalData { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&roachpb.RequestLeaseRequest{Lease: lease, PrevLease: pc.lease.Lease}) return pc.newProposal(ba) } func (pc proposalCreator) newLeaseTransferProposal(lease roachpb.Lease) *ProposalData { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&roachpb.TransferLeaseRequest{Lease: lease, PrevLease: pc.lease.Lease}) return pc.newProposal(ba) } -func (pc proposalCreator) newProposal(ba roachpb.BatchRequest) *ProposalData { +func (pc proposalCreator) newProposal(ba *roachpb.BatchRequest) *ProposalData { var lease *roachpb.Lease var isLeaseRequest bool switch v := ba.Requests[0].GetInner().(type) { @@ -277,7 +277,7 @@ func (pc proposalCreator) newProposal(ba roachpb.BatchRequest) *ProposalData { State: &kvserverpb.ReplicaState{Lease: lease}, }, }, - Request: &ba, + Request: ba, leaseStatus: pc.lease, } p.encodedCommand = pc.encodeProposal(p) diff --git a/pkg/kv/kvserver/replica_range_lease.go b/pkg/kv/kvserver/replica_range_lease.go index 2c698261c12e..758cffab8893 100644 --- a/pkg/kv/kvserver/replica_range_lease.go +++ b/pkg/kv/kvserver/replica_range_lease.go @@ -528,7 +528,7 @@ func (p *pendingLeaseRequest) requestLease( // solution to the below issue: // // https://github.com/cockroachdb/cockroach/issues/37906 - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Timestamp = p.repl.store.Clock().Now() ba.RangeID = p.repl.RangeID // NB: diff --git a/pkg/kv/kvserver/replica_rangefeed_test.go b/pkg/kv/kvserver/replica_rangefeed_test.go index 55f0a2e7a06d..6a35b5b1b66f 100644 --- a/pkg/kv/kvserver/replica_rangefeed_test.go +++ b/pkg/kv/kvserver/replica_rangefeed_test.go @@ -445,7 +445,7 @@ func TestReplicaRangefeed(t *testing.T) { } gcReq.Key = startKey gcReq.EndKey = firstStore.LookupReplica(startKey).Desc().EndKey.AsRawKey() - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.RangeID = rangeID ba.Add(gcReq) if _, pErr := firstStore.Send(ctx, ba); pErr != nil { @@ -1194,7 +1194,7 @@ func TestRangefeedCheckpointsRecoverFromLeaseExpiration(t *testing.T) { WallClock: manualClock, }, Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { // Once reject is set, the test wants full control over the requests // evaluating on the scratch range. On that range, we'll reject // everything that's not triggered by the test because we want to only @@ -1369,7 +1369,7 @@ func TestNewRangefeedForceLeaseRetry(t *testing.T) { WallClock: manualClock, }, Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { // Once reject is set, the test wants full control over the requests // evaluating on the scratch range. On that range, we'll reject diff --git a/pkg/kv/kvserver/replica_rankings_test.go b/pkg/kv/kvserver/replica_rankings_test.go index 1365b440d4b1..285be5208789 100644 --- a/pkg/kv/kvserver/replica_rankings_test.go +++ b/pkg/kv/kvserver/replica_rankings_test.go @@ -122,8 +122,8 @@ func TestAddSSTQPSStat(t *testing.T) { RequestHeader: roachpb.RequestHeader{Key: start}, } - addSSTBA := roachpb.BatchRequest{} - nonSSTBA := roachpb.BatchRequest{} + addSSTBA := &roachpb.BatchRequest{} + nonSSTBA := &roachpb.BatchRequest{} addSSTBA.Add(sstReq) nonSSTBA.Add(get) @@ -134,7 +134,7 @@ func TestAddSSTQPSStat(t *testing.T) { testCases := []struct { addsstRequestFactor int expectedQPS float64 - ba roachpb.BatchRequest + ba *roachpb.BatchRequest }{ {0, 1, addSSTBA}, {100, 1, nonSSTBA}, @@ -184,9 +184,9 @@ func TestAddSSTQPSStat(t *testing.T) { } // genVariableRead returns a batch request containing, start-end sequential key reads. -func genVariableRead(ctx context.Context, start, end roachpb.Key) roachpb.BatchRequest { +func genVariableRead(ctx context.Context, start, end roachpb.Key) *roachpb.BatchRequest { scan := roachpb.NewScan(start, end, false) - readBa := roachpb.BatchRequest{} + readBa := &roachpb.BatchRequest{} readBa.Add(scan) return readBa } @@ -355,7 +355,7 @@ func TestReadLoadMetricAccounting(t *testing.T) { MVCCStats: storageutils.SSTStats(t, sst, 0), } - addSSTBA := roachpb.BatchRequest{} + addSSTBA := &roachpb.BatchRequest{} addSSTBA.Add(sstReq) // Send an AddSSTRequest once to create the key range. @@ -366,18 +366,18 @@ func TestReadLoadMetricAccounting(t *testing.T) { RequestHeader: roachpb.RequestHeader{Key: start}, } - getReadBA := roachpb.BatchRequest{} + getReadBA := &roachpb.BatchRequest{} getReadBA.Add(get) scan := &roachpb.ScanRequest{ RequestHeader: roachpb.RequestHeader{Key: start, EndKey: end}, } - scanReadBA := roachpb.BatchRequest{} + scanReadBA := &roachpb.BatchRequest{} scanReadBA.Add(scan) testCases := []struct { - ba roachpb.BatchRequest + ba *roachpb.BatchRequest expectedRQPS float64 expectedWPS float64 expectedRPS float64 diff --git a/pkg/kv/kvserver/replica_send.go b/pkg/kv/kvserver/replica_send.go index dbe2252f4e87..edca0ad25be4 100644 --- a/pkg/kv/kvserver/replica_send.go +++ b/pkg/kv/kvserver/replica_send.go @@ -109,7 +109,7 @@ var optimisticEvalLimitedScans = settings.RegisterBoolSetting( // to commit the command, then signaling proposer and // applying the command) func (r *Replica) Send( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { br, writeBytes, pErr := r.SendWithWriteBytes(ctx, ba) writeBytes.Release() @@ -119,7 +119,7 @@ func (r *Replica) Send( // SendWithWriteBytes is the implementation of Send with an additional // *StoreWriteBytes return value. func (r *Replica) SendWithWriteBytes( - ctx context.Context, req roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *StoreWriteBytes, *roachpb.Error) { if r.store.cfg.Settings.CPUProfileType() == cluster.CPUProfileWithLabels { defer pprof.SetGoroutineLabels(ctx) @@ -129,7 +129,6 @@ func (r *Replica) SendWithWriteBytes( } // Add the range log tag. ctx = r.AnnotateCtx(ctx) - ba := &req // Record summary throughput information about the batch request for // accounting. @@ -160,7 +159,7 @@ func (r *Replica) SendWithWriteBytes( } if filter := r.store.cfg.TestingKnobs.TestingRequestFilter; filter != nil { - if pErr := filter(ctx, *ba); pErr != nil { + if pErr := filter(ctx, ba); pErr != nil { return nil, nil, pErr } } @@ -192,7 +191,7 @@ func (r *Replica) SendWithWriteBytes( log.Eventf(ctx, "replica.Send got error: %s", pErr) } else { if filter := r.store.cfg.TestingKnobs.TestingResponseFilter; filter != nil { - pErr = filter(ctx, *ba, br) + pErr = filter(ctx, ba, br) } } @@ -492,7 +491,7 @@ func (r *Replica) executeBatchWithConcurrencyRetries( // guard without having already released the guard's latches. g.AssertLatches() if filter := r.store.cfg.TestingKnobs.TestingConcurrencyRetryFilter; filter != nil { - filter(ctx, *ba, pErr) + filter(ctx, ba, pErr) } // Typically, retries are marked PessimisticEval. The one exception is a diff --git a/pkg/kv/kvserver/replica_sideload_test.go b/pkg/kv/kvserver/replica_sideload_test.go index 704c6511e9c8..32eae418d4eb 100644 --- a/pkg/kv/kvserver/replica_sideload_test.go +++ b/pkg/kv/kvserver/replica_sideload_test.go @@ -615,7 +615,7 @@ func TestRaftSSTableSideloadingProposal(t *testing.T) { } { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} get := getArgs(roachpb.Key(key)) ba.Add(&get) ba.Header.RangeID = tc.repl.RangeID @@ -712,7 +712,7 @@ func TestRaftSSTableSideloading(t *testing.T) { // Disable log truncation to make sure our proposal stays in the log. tc.store.SetRaftLogQueueActive(false) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.RangeID = tc.repl.RangeID // Put a sideloaded proposal on the Range. diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index 457631b590de..6ea04cd100db 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -222,7 +222,7 @@ func (tc *testContext) StartWithStoreConfigAndVersion( } func (tc *testContext) Sender() kv.Sender { - return kv.Wrap(tc.repl, func(ba roachpb.BatchRequest) roachpb.BatchRequest { + return kv.Wrap(tc.repl, func(ba *roachpb.BatchRequest) *roachpb.BatchRequest { if ba.RangeID == 0 { ba.RangeID = 1 } @@ -430,7 +430,7 @@ func TestIsOnePhaseCommit(t *testing.T) { fmt.Sprintf("%d:isNonTxn:%t,canForwardTS:%t,isRestarted:%t,isWTO:%t,isTSOff:%t", i, c.isNonTxn, c.canForwardTS, c.isRestarted, c.isWTO, c.isTSOff), func(t *testing.T) { - ba := roachpb.BatchRequest{Requests: c.ru} + ba := &roachpb.BatchRequest{Requests: c.ru} if !c.isNonTxn { ba.Txn = newTransaction("txn", roachpb.Key("a"), 1, clock) if c.canForwardTS { @@ -455,9 +455,9 @@ func TestIsOnePhaseCommit(t *testing.T) { // Emulate what a server actually does and bump the write timestamp when // possible. This makes some batches with diverged read and write // timestamps pass isOnePhaseCommit(). - maybeBumpReadTimestampToWriteTimestamp(ctx, &ba, allSpansGuard()) + maybeBumpReadTimestampToWriteTimestamp(ctx, ba, allSpansGuard()) - if is1PC := isOnePhaseCommit(&ba); is1PC != c.exp1PC { + if is1PC := isOnePhaseCommit(ba); is1PC != c.exp1PC { t.Errorf("expected 1pc=%t; got %t", c.exp1PC, is1PC) } }) @@ -497,7 +497,7 @@ func TestReplicaContains(t *testing.T) { func sendLeaseRequest(r *Replica, l *roachpb.Lease) error { ctx := context.Background() - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Timestamp = r.store.Clock().Now() st := r.CurrentLeaseStatus(ctx) leaseReq := &roachpb.RequestLeaseRequest{ @@ -506,7 +506,7 @@ func sendLeaseRequest(r *Replica, l *roachpb.Lease) error { } ba.Add(leaseReq) _, tok := r.mu.proposalBuf.TrackEvaluatingRequest(ctx, hlc.MinTimestamp) - ch, _, _, _, pErr := r.evalAndPropose(ctx, &ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) + ch, _, _, _, pErr := r.evalAndPropose(ctx, ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) if pErr == nil { // Next if the command was committed, wait for the range to apply it. // TODO(bdarnell): refactor this to a more conventional error-handling pattern. @@ -863,7 +863,7 @@ func TestReplicaRangeMismatchRedirect(t *testing.T) { } gArgs := getArgs(roachpb.Key("b")) - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{ RangeID: 1, } @@ -1333,11 +1333,11 @@ func TestReplicaLeaseRejectUnknownRaftNodeID(t *testing.T) { }, } st := tc.repl.CurrentLeaseStatus(ctx) - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Timestamp = tc.repl.store.Clock().Now() ba.Add(&roachpb.RequestLeaseRequest{Lease: *lease}) _, tok := tc.repl.mu.proposalBuf.TrackEvaluatingRequest(ctx, hlc.MinTimestamp) - ch, _, _, _, pErr := tc.repl.evalAndPropose(ctx, &ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) + ch, _, _, _, pErr := tc.repl.evalAndPropose(ctx, ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) if pErr == nil { // Next if the command was committed, wait for the range to apply it. // TODO(bdarnell): refactor to a more conventional error-handling pattern. @@ -2201,7 +2201,7 @@ func TestReplicaLatching(t *testing.T) { defer close(blockingDone) // make sure teardown can happen sendWithHeader := func(header roachpb.Header, args roachpb.Request) *roachpb.Error { - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Header = header ba.Add(args) @@ -2426,7 +2426,7 @@ func TestReplicaLatchingSelfOverlap(t *testing.T) { testutils.RunTrueAndFalse(t, "cmd1Read", func(t *testing.T, cmd1Read bool) { testutils.RunTrueAndFalse(t, "cmd2Read", func(t *testing.T, cmd2Read bool) { key := fmt.Sprintf("%v,%v", cmd1Read, cmd2Read) - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Add(readOrWriteArgs(roachpb.Key(key), cmd1Read)) ba.Add(readOrWriteArgs(roachpb.Key(key), cmd2Read)) @@ -2508,11 +2508,11 @@ func TestReplicaLatchingTimestampNonInterference(t *testing.T) { blockKey.Store(test.key) errCh := make(chan *roachpb.Error, 2) - baR := roachpb.BatchRequest{} + baR := &roachpb.BatchRequest{} baR.Timestamp = test.readerTS gArgs := getArgs(test.key) baR.Add(&gArgs) - baW := roachpb.BatchRequest{} + baW := &roachpb.BatchRequest{} baW.Timestamp = test.writerTS pArgs := putArgs(test.key, []byte("value")) baW.Add(&pArgs) @@ -2620,7 +2620,7 @@ func TestReplicaLatchingOptimisticEvaluationKeyLimit(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) testutils.RunTrueAndFalse(t, "point-reads", func(t *testing.T, pointReads bool) { - var baRead roachpb.BatchRequest + baRead := &roachpb.BatchRequest{} if pointReads { gArgs1, gArgs2 := getArgsString("a"), getArgsString("b") gArgs3, gArgs4 := getArgsString("c"), getArgsString("d") @@ -2700,7 +2700,7 @@ func TestReplicaLatchingOptimisticEvaluationKeyLimit(t *testing.T) { <-blockedCh // Write is now blocked while holding latches. blockWriter.Store(false) - baReadCopy := baRead + baReadCopy := baRead.ShallowCopy() baReadCopy.MaxSpanRequestKeys = test.limit go func() { _, pErr := tc.Sender().Send(context.Background(), baReadCopy) @@ -2748,7 +2748,7 @@ func TestReplicaLatchingOptimisticEvaluationSkipLocked(t *testing.T) { defer log.Scope(t).Close(t) testutils.RunTrueAndFalse(t, "point-reads", func(t *testing.T, pointReads bool) { testutils.RunTrueAndFalse(t, "locking-reads", func(t *testing.T, lockingReads bool) { - var baRead roachpb.BatchRequest + baRead := &roachpb.BatchRequest{} baRead.WaitPolicy = lock.WaitPolicy_SkipLocked if pointReads { gArgs1, gArgs2 := getArgsString("a"), getArgsString("b") @@ -2894,7 +2894,7 @@ func TestReplicaUseTSCache(t *testing.T) { // Perform a conflicting write. Should get bumped. pArgs := putArgs([]byte("a"), []byte("value")) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&pArgs) ba.Timestamp = startTS @@ -2936,7 +2936,7 @@ func TestReplicaTSCacheForwardsIntentTS(t *testing.T) { gArgs := getArgs(keyGet) drArgs := deleteRangeArgs(keyDeleteRange, keyDeleteRange.Next()) assignSeqNumsForReqs(txnNew, &gArgs, &drArgs) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header.Txn = txnNew ba.Add(&gArgs, &drArgs) if _, pErr := tc.Sender().Send(ctx, ba); pErr != nil { @@ -3183,7 +3183,7 @@ func TestReplicaNoTSCacheInconsistent(t *testing.T) { } pArgs := putArgs([]byte("a"), []byte("value")) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Timestamp: hlc.Timestamp{WallTime: 0, Logical: 1}} ba.Add(&pArgs) br, pErr := tc.Sender().Send(context.Background(), ba) @@ -3237,7 +3237,7 @@ func TestReplicaNoTSCacheUpdateOnFailure(t *testing.T) { } // Write the intent again -- should not have its timestamp upgraded! - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn} ba.Add(&pArgs) assignSeqNumsForReqs(txn, &pArgs) @@ -3278,7 +3278,7 @@ func TestReplicaNoTSCacheIncrementWithinTxn(t *testing.T) { } // Now try a write and verify timestamp isn't incremented. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn} pArgs := putArgs(key, []byte("value")) ba.Add(&pArgs) @@ -3306,7 +3306,7 @@ func TestReplicaNoTSCacheIncrementWithinTxn(t *testing.T) { expTS := ts expTS.Logical++ - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Header = roachpb.Header{Timestamp: ts} ba.Add(&pArgs) assignSeqNumsForReqs(txn, &pArgs) @@ -3414,7 +3414,7 @@ func TestReplicaTxnIdempotency(t *testing.T) { tc.Start(ctx, t, stopper) runWithTxn := func(txn *roachpb.Transaction, reqs ...roachpb.Request) error { - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Header.Txn = txn ba.Add(reqs...) _, pErr := tc.Sender().Send(ctx, ba) @@ -4068,7 +4068,7 @@ func TestEndTxnDeadline_1PC(t *testing.T) { // Past deadline. et.Deadline = txn.WriteTimestamp.Prev() - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = etH ba.Add(&put, &et) assignSeqNumsForReqs(txn, &put, &et) @@ -4105,7 +4105,7 @@ func Test1PCTransactionWriteTimestamp(t *testing.T) { } // Now verify that the write triggers a retry. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = etH ba.Add(&put, &et) assignSeqNumsForReqs(txn, &put, &et) @@ -4526,7 +4526,7 @@ func TestEndTxnRollbackAbortedTransaction(t *testing.T) { } // Check that the intent has not yet been resolved. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} gArgs := getArgs(key) ba.Add(&gArgs) if err := ba.SetActiveTimestamp(tc.Clock()); err != nil { @@ -4593,7 +4593,7 @@ func TestRPCRetryProtectionInTxn(t *testing.T) { txn := newTransaction("test", key, 1, tc.Clock()) // Send a batch with put & end txn. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.CanForwardReadTimestamp = noPriorReads put := putArgs(key, []byte("value")) et, _ := endTxnArgs(txn, true) @@ -4645,7 +4645,7 @@ func TestErrorsDontCarryWriteTooOldFlag(t *testing.T) { // Write a value outside of the txn to cause a WriteTooOldError later. put := putArgs(keyA, []byte("val1")) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&put) _, pErr := tc.Sender().Send(ctx, ba) require.Nil(t, pErr) @@ -4690,7 +4690,7 @@ func TestBatchRetryCantCommitIntents(t *testing.T) { txn := newTransaction("test", key, 1, tc.Clock()) // Send a put for keyA. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} put := putArgs(key, []byte("value")) ba.Header = roachpb.Header{Txn: txn} ba.Add(&put) @@ -4704,7 +4704,7 @@ func TestBatchRetryCantCommitIntents(t *testing.T) { } // Send a put for keyB. - var ba2 roachpb.BatchRequest + ba2 := &roachpb.BatchRequest{} putB := putArgs(keyB, []byte("value")) putTxn := br.Txn.Clone() ba2.Header = roachpb.Header{Txn: putTxn} @@ -4860,7 +4860,7 @@ func setupResolutionTest( } { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = h ba.RangeID = newRepl.RangeID if err := ba.SetActiveTimestamp(newRepl.store.Clock()); err != nil { @@ -4912,7 +4912,7 @@ func TestEndTxnResolveOnlyLocalIntents(t *testing.T) { // Check if the intent in the other range has not yet been resolved. { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header.RangeID = newRepl.RangeID gArgs := getArgs(splitKey) ba.Add(&gArgs) @@ -5062,7 +5062,7 @@ func TestEndTxnDirectGC_1PC(t *testing.T) { et.LockSpans = []roachpb.Span{{Key: key}} assignSeqNumsForReqs(txn, &put, &et) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = etH ba.Add(&put, &et) br, err := tc.Sender().Send(ctx, ba) @@ -5138,7 +5138,7 @@ func TestReplicaTransactionRequires1PC(t *testing.T) { key := roachpb.Key(fmt.Sprintf("%d", i)) // Create the 1PC batch. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} txn := newTransaction("test", key, 1, tc.Clock()) put := putArgs(key, []byte("value")) et, etH := endTxnArgs(txn, true) @@ -5180,7 +5180,7 @@ func TestReplicaEndTxnWithRequire1PC(t *testing.T) { key := roachpb.Key("a") txn := newTransaction("test", key, 1, tc.Clock()) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn} put := putArgs(key, []byte("value")) ba.Add(&put) @@ -5191,7 +5191,7 @@ func TestReplicaEndTxnWithRequire1PC(t *testing.T) { et, etH := endTxnArgs(txn, true) et.Require1PC = true - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Header = etH ba.Add(&et) assignSeqNumsForReqs(txn, &et) @@ -6006,7 +6006,7 @@ func TestPushTxnSerializableRestart(t *testing.T) { // Try to end pushed transaction at restart timestamp, which is // earlier than its now-pushed timestamp. Should fail. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&put) ba.Add(&etArgs) ba.Header.Txn = pushee @@ -6140,7 +6140,7 @@ func TestQueryIntentRequest(t *testing.T) { txnCopy := *txn pArgs2 := putArgs(keyPrevent, []byte("value2")) assignSeqNumsForReqs(&txnCopy, &pArgs2) - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txnCopy} ba.Add(&pArgs2) br, pErr := tc.Sender().Send(context.Background(), ba) @@ -6942,7 +6942,7 @@ func TestBatchErrorWithIndex(t *testing.T) { defer stopper.Stop(ctx) tc.Start(ctx, t, stopper) - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} // This one succeeds. ba.Add(&roachpb.PutRequest{ RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("k")}, @@ -7041,7 +7041,7 @@ func TestQuotaPoolReleasedOnFailedProposal(t *testing.T) { t.Fatal(pErr) } - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} pArg := putArgs(roachpb.Key("a"), make([]byte, 1<<10)) ba.Add(&pArg) ctx = context.WithValue(ctx, magicKey{}, "foo") @@ -7437,7 +7437,7 @@ func TestReplicaCancelRaft(t *testing.T) { if cancelEarly { cancel() } - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.RangeID = 1 ba.Add(&roachpb.GetRequest{ RequestHeader: roachpb.RequestHeader{Key: key}, @@ -7445,7 +7445,7 @@ func TestReplicaCancelRaft(t *testing.T) { if err := ba.SetActiveTimestamp(tc.Clock()); err != nil { t.Fatal(err) } - _, _, pErr := tc.repl.executeBatchWithConcurrencyRetries(ctx, &ba, (*Replica).executeWriteBatch) + _, _, pErr := tc.repl.executeBatchWithConcurrencyRetries(ctx, ba, (*Replica).executeWriteBatch) if cancelEarly { if !testutils.IsPError(pErr, context.Canceled.Error()) { t.Fatalf("expected canceled error; got %v", pErr) @@ -7495,13 +7495,13 @@ func TestReplicaAbandonProposal(t *testing.T) { } tc.repl.mu.Unlock() - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.RangeID = 1 ba.Timestamp = tc.Clock().Now() ba.Add(&roachpb.PutRequest{ RequestHeader: roachpb.RequestHeader{Key: []byte("acdfg")}, }) - _, _, pErr := tc.repl.executeBatchWithConcurrencyRetries(ctx, &ba, (*Replica).executeWriteBatch) + _, _, pErr := tc.repl.executeBatchWithConcurrencyRetries(ctx, ba, (*Replica).executeWriteBatch) if pErr == nil { t.Fatal("expected failure, but found success") } @@ -7603,7 +7603,7 @@ func TestReplicaRetryRaftProposal(t *testing.T) { pArg := putArgs(roachpb.Key("a"), []byte("asd")) { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&pArg) ba.Timestamp = tc.Clock().Now() if _, pErr := tc.Sender().Send(ctx, ba); pErr != nil { @@ -7622,7 +7622,7 @@ func TestReplicaRetryRaftProposal(t *testing.T) { log.Infof(ctx, "test begins") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.RangeID = 1 ba.Timestamp = tc.Clock().Now() const expInc = 123 @@ -7631,7 +7631,7 @@ func TestReplicaRetryRaftProposal(t *testing.T) { { _, _, pErr := tc.repl.executeBatchWithConcurrencyRetries( context.WithValue(ctx, magicKey{}, "foo"), - &ba, + ba, (*Replica).executeWriteBatch, ) if pErr != nil { @@ -7666,7 +7666,7 @@ func TestReplicaRetryRaftProposal(t *testing.T) { }) _, _, pErr := tc.repl.executeBatchWithConcurrencyRetries( context.WithValue(ctx, magicKey{}, "foo"), - &ba, + ba, (*Replica).executeWriteBatch, ) if pErr != nil { @@ -7707,7 +7707,7 @@ func TestReplicaCancelRaftCommandProgress(t *testing.T) { var chs []chan proposalResult const num = 10 for i := 0; i < num; i++ { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Timestamp = tc.Clock().Now() ba.Add(&roachpb.PutRequest{ RequestHeader: roachpb.RequestHeader{ @@ -7716,7 +7716,7 @@ func TestReplicaCancelRaftCommandProgress(t *testing.T) { }) st := repl.CurrentLeaseStatus(ctx) _, tok := repl.mu.proposalBuf.TrackEvaluatingRequest(ctx, hlc.MinTimestamp) - ch, _, id, _, err := repl.evalAndPropose(ctx, &ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) + ch, _, id, _, err := repl.evalAndPropose(ctx, ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) if err != nil { t.Fatal(err) } @@ -7778,7 +7778,7 @@ func TestReplicaBurstPendingCommandsAndRepropose(t *testing.T) { const num = 10 chs := make([]chan proposalResult, 0, num) for i := 0; i < num; i++ { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Timestamp = tc.Clock().Now() ba.Add(&roachpb.PutRequest{ RequestHeader: roachpb.RequestHeader{ @@ -7787,7 +7787,7 @@ func TestReplicaBurstPendingCommandsAndRepropose(t *testing.T) { }) _, tok := tc.repl.mu.proposalBuf.TrackEvaluatingRequest(ctx, hlc.MinTimestamp) st := tc.repl.CurrentLeaseStatus(ctx) - ch, _, _, _, err := tc.repl.evalAndPropose(ctx, &ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) + ch, _, _, _, err := tc.repl.evalAndPropose(ctx, ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) if err != nil { t.Fatal(err) } @@ -7897,11 +7897,11 @@ func TestReplicaRefreshPendingCommandsTicks(t *testing.T) { for i := 0; i < 2*electionTicks; i++ { // Add another pending command on each iteration. id := fmt.Sprintf("%08d", i) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Timestamp = tc.Clock().Now() ba.Add(&roachpb.PutRequest{RequestHeader: roachpb.RequestHeader{Key: roachpb.Key(id)}}) st := r.CurrentLeaseStatus(ctx) - cmd, pErr := r.requestToProposal(ctx, kvserverbase.CmdIDKey(id), &ba, allSpansGuard(), &st, uncertainty.Interval{}) + cmd, pErr := r.requestToProposal(ctx, kvserverbase.CmdIDKey(id), ba, allSpansGuard(), &st, uncertainty.Interval{}) if pErr != nil { t.Fatal(pErr) } @@ -8018,14 +8018,14 @@ func TestReplicaRefreshMultiple(t *testing.T) { // since the reproposals we're concerned with don't result in // reevaluation it doesn't matter) inc := incrementArgs(key, 1) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(inc) ba.Timestamp = tc.Clock().Now() incCmdID = makeIDKey() atomic.StoreInt32(&filterActive, 1) st := repl.CurrentLeaseStatus(ctx) - proposal, pErr := repl.requestToProposal(ctx, incCmdID, &ba, allSpansGuard(), &st, uncertainty.Interval{}) + proposal, pErr := repl.requestToProposal(ctx, incCmdID, ba, allSpansGuard(), &st, uncertainty.Interval{}) if pErr != nil { t.Fatal(pErr) } @@ -8176,7 +8176,7 @@ func TestReplicaReproposalWithNewLeaseIndexError(t *testing.T) { // Perform a write that will first hit an illegal lease index error and // will then hit the injected error when we attempt to repropose it. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} iArg := incrementArgs(key, 10) ba.Add(iArg) if _, pErr := tc.Sender().Send(magicCtx, ba); pErr == nil { @@ -8216,7 +8216,7 @@ func TestFailureToProcessCommandClearsLocalResult(t *testing.T) { key := roachpb.Key("a") txn := newTransaction("test", key, 1, tc.Clock()) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn} put := putArgs(key, []byte("value")) assignSeqNumsForReqs(txn, &put) @@ -8246,7 +8246,7 @@ func TestFailureToProcessCommandClearsLocalResult(t *testing.T) { opCtx, getRecAndFinish := tracing.ContextWithRecordingSpan(ctx, tr, "test-recording") defer getRecAndFinish() - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} et, etH := endTxnArgs(txn, true /* commit */) et.LockSpans = []roachpb.Span{{Key: key}} assignSeqNumsForReqs(txn, &et) @@ -8291,7 +8291,7 @@ func TestMVCCStatsGCCommutesWithWrites(t *testing.T) { require.NoError(t, err) write := func() hlc.Timestamp { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} put := putArgs(key, []byte("0")) ba.Add(&put) resp, pErr := store.TestSender().Send(ctx, ba) @@ -8581,7 +8581,7 @@ func TestGCThresholdRacesWithRead(t *testing.T) { require.NoError(t, err) testutils.SucceedsSoon(t, func() error { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.RangeID = desc.RangeID ba.ReadConsistency = roachpb.INCONSISTENT ba.Add(&roachpb.QueryResolvedTimestampRequest{ @@ -8687,7 +8687,7 @@ func BenchmarkMVCCGCWithForegroundTraffic(b *testing.B) { send := func(args roachpb.Request) *roachpb.BatchResponse { var header roachpb.Header header.Timestamp = tc.Clock().Now() - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Header = header ba.Add(args) resp, err := tc.Sender().Send(ctx, ba) @@ -8834,7 +8834,7 @@ func TestReplicaTimestampCacheBumpNotLost(t *testing.T) { txn := newTransaction("test", key, 1, tc.Clock()) minNewTS := func() hlc.Timestamp { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} scan := scanArgs(key, tc.repl.Desc().EndKey.AsRawKey()) ba.Add(scan) @@ -8848,7 +8848,7 @@ func TestReplicaTimestampCacheBumpNotLost(t *testing.T) { return resp.Timestamp }() - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Txn = txn txnPut := putArgs(key, []byte("timestamp should be bumped")) ba.Add(&txnPut) @@ -8892,7 +8892,7 @@ func TestReplicaEvaluationNotTxnMutation(t *testing.T) { txn := newTransaction("test", key, 1, tc.Clock()) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Txn = txn ba.Timestamp = txn.WriteTimestamp txnPut := putArgs(key, []byte("foo")) @@ -8906,7 +8906,7 @@ func TestReplicaEvaluationNotTxnMutation(t *testing.T) { assignSeqNumsForReqs(txn, &txnPut, &txnPut2) origTxn := txn.Clone() - batch, _, _, _, pErr := tc.repl.evaluateWriteBatch(ctx, makeIDKey(), &ba, allSpansGuard(), nil, uncertainty.Interval{}) + batch, _, _, _, pErr := tc.repl.evaluateWriteBatch(ctx, makeIDKey(), ba, allSpansGuard(), nil, uncertainty.Interval{}) defer batch.Close() if pErr != nil { t.Fatal(pErr) @@ -9324,7 +9324,7 @@ func TestNoopRequestsNotProposed(t *testing.T) { sendReq := func( ctx context.Context, repl *Replica, req roachpb.Request, txn *roachpb.Transaction, ) *roachpb.Error { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header.RangeID = repl.RangeID ba.Add(req) ba.Txn = txn @@ -9518,7 +9518,7 @@ func TestNoopRequestsNotProposed(t *testing.T) { } repl.mu.Unlock() - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Timestamp = markerTS ba.RangeID = repl.RangeID if c.useTxn { @@ -9617,7 +9617,7 @@ func TestErrorInRaftApplicationClearsIntents(t *testing.T) { txn.Sequence++ etArgs, _ := endTxnArgs(txn, true /* commit */) etArgs.LockSpans = []roachpb.Span{{Key: roachpb.Key("bb")}} - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header.Txn = txn ba.Add(&etArgs) assignSeqNumsForReqs(txn, &etArgs) @@ -9636,7 +9636,7 @@ func TestErrorInRaftApplicationClearsIntents(t *testing.T) { exLease, _ := repl.GetLease() st := kvserverpb.LeaseStatus{Lease: exLease, State: kvserverpb.LeaseState_VALID} _, tok := repl.mu.proposalBuf.TrackEvaluatingRequest(ctx, hlc.MinTimestamp) - ch, _, _, _, pErr := repl.evalAndPropose(ctx, &ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) + ch, _, _, _, pErr := repl.evalAndPropose(ctx, ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) if pErr != nil { t.Fatal(pErr) } @@ -9674,7 +9674,7 @@ func TestProposeWithAsyncConsensus(t *testing.T) { tc.StartWithStoreConfig(ctx, t, stopper, tsc) repl := tc.repl - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} key := roachpb.Key("a") put := putArgs(key, []byte("val")) ba.Add(&put) @@ -9684,7 +9684,7 @@ func TestProposeWithAsyncConsensus(t *testing.T) { atomic.StoreInt32(&filterActive, 1) st := tc.repl.CurrentLeaseStatus(ctx) _, tok := repl.mu.proposalBuf.TrackEvaluatingRequest(ctx, hlc.MinTimestamp) - ch, _, _, _, pErr := repl.evalAndPropose(ctx, &ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) + ch, _, _, _, pErr := repl.evalAndPropose(ctx, ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) if pErr != nil { t.Fatal(pErr) } @@ -9740,7 +9740,7 @@ func TestApplyPaginatedCommittedEntries(t *testing.T) { repl := tc.repl // Block command application then propose a command to Raft. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} key := roachpb.Key("a") put := putArgs(key, []byte("val")) ba.Add(&put) @@ -9749,7 +9749,7 @@ func TestApplyPaginatedCommittedEntries(t *testing.T) { atomic.StoreInt32(&filterActive, 1) st := repl.CurrentLeaseStatus(ctx) _, tok := repl.mu.proposalBuf.TrackEvaluatingRequest(ctx, hlc.MinTimestamp) - _, _, _, _, pErr := repl.evalAndPropose(ctx, &ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) + _, _, _, _, pErr := repl.evalAndPropose(ctx, ba, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) if pErr != nil { t.Fatal(pErr) } @@ -9760,7 +9760,7 @@ func TestApplyPaginatedCommittedEntries(t *testing.T) { <-blockingRaftApplication var ch chan proposalResult for i := 0; i < 50; i++ { - var ba2 roachpb.BatchRequest + ba2 := &roachpb.BatchRequest{} key := roachpb.Key("a") put := putArgs(key, make([]byte, 2*tsc.RaftMaxCommittedSizePerReady)) ba2.Add(&put) @@ -9768,7 +9768,7 @@ func TestApplyPaginatedCommittedEntries(t *testing.T) { var pErr *roachpb.Error _, tok := repl.mu.proposalBuf.TrackEvaluatingRequest(ctx, hlc.MinTimestamp) - ch, _, _, _, pErr = repl.evalAndPropose(ctx, &ba2, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) + ch, _, _, _, pErr = repl.evalAndPropose(ctx, ba2, allSpansGuard(), &st, uncertainty.Interval{}, tok.Move(ctx)) if pErr != nil { t.Fatal(pErr) } @@ -10371,7 +10371,7 @@ func TestConsistenctQueueErrorFromCheckConsistency(t *testing.T) { cfg := TestStoreConfig(nil) cfg.TestingKnobs = StoreTestingKnobs{ - TestingRequestFilter: func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if _, ok := ba.GetArg(roachpb.ComputeChecksum); ok { return roachpb.NewErrorf("boom") } @@ -10421,7 +10421,7 @@ func TestReplicaServersideRefreshes(t *testing.T) { ) return &txn } - send := func(ba roachpb.BatchRequest) (hlc.Timestamp, error) { + send := func(ba *roachpb.BatchRequest) (hlc.Timestamp, error) { br, pErr := tc.Sender().Send(ctx, ba) if pErr != nil { return hlc.Timestamp{}, pErr.GoError() @@ -10443,13 +10443,13 @@ func TestReplicaServersideRefreshes(t *testing.T) { return br.Timestamp, nil } get := func(key string) (hlc.Timestamp, error) { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} get := getArgs(roachpb.Key(key)) ba.Add(&get) return send(ba) } put := func(key, val string) (hlc.Timestamp, error) { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} put := putArgs(roachpb.Key(key), []byte(val)) ba.Add(&put) return send(ba) @@ -10458,7 +10458,7 @@ func TestReplicaServersideRefreshes(t *testing.T) { testCases := []struct { name string setupFn func() (hlc.Timestamp, error) // returns expected batch execution timestamp - batchFn func(hlc.Timestamp) (roachpb.BatchRequest, hlc.Timestamp) + batchFn func(hlc.Timestamp) (*roachpb.BatchRequest, hlc.Timestamp) expErr string }{ { @@ -10466,7 +10466,8 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return put("a", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { + ba = &roachpb.BatchRequest{} ba.Timestamp = ts.Prev() expTS = ts.Next() put := putArgs(roachpb.Key("a"), []byte("put2")) @@ -10483,7 +10484,8 @@ func TestReplicaServersideRefreshes(t *testing.T) { _, _ = put("b", "put1") return put("b", "put2") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { + ba = &roachpb.BatchRequest{} ba.Timestamp = ts.Prev() expTS = ts.Next() cput := cPutArgs(roachpb.Key("b"), []byte("cput"), []byte("put2")) @@ -10500,7 +10502,8 @@ func TestReplicaServersideRefreshes(t *testing.T) { _, _ = put("b-iput", "put1") return put("b-iput", "put2") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { + ba = &roachpb.BatchRequest{} ba.Timestamp = ts.Prev() expTS = ts.Next() iput := iPutArgs(roachpb.Key("b-iput"), []byte("put2")) @@ -10519,7 +10522,8 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return put("a", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { + ba = &roachpb.BatchRequest{} ba.Timestamp = ts.Prev() get := getArgs(roachpb.Key("a")) put := putArgs(roachpb.Key("a"), []byte("put2")) @@ -10533,7 +10537,8 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return get("a") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { + ba = &roachpb.BatchRequest{} ba.Timestamp = ts.Prev() expTS = ts.Next() put := putArgs(roachpb.Key("a"), []byte("put2")) @@ -10548,7 +10553,8 @@ func TestReplicaServersideRefreshes(t *testing.T) { _, _ = put("c-cput", "put") return put("c-cput", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("c-cput", ts.Prev()) cput := cPutArgs(roachpb.Key("c-cput"), []byte("iput"), []byte("put")) ba.Add(&cput) @@ -10563,7 +10569,8 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return put("c-iput", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("c-iput", ts.Prev()) iput := iPutArgs(roachpb.Key("c-iput"), []byte("iput")) ba.Add(&iput) @@ -10578,7 +10585,8 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return put("c-scan", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("c-scan", ts.Prev()) scan := scanArgs(roachpb.Key("c-scan"), roachpb.Key("c-scan\x00")) scan.KeyLocking = lock.Exclusive @@ -10595,8 +10603,9 @@ func TestReplicaServersideRefreshes(t *testing.T) { _, _ = put("c-cput", "put") return put("c-cput", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { expTS = ts.Next() + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("c-cput", ts.Prev()) ba.CanForwardReadTimestamp = true cput := cPutArgs(roachpb.Key("c-cput"), []byte("iput"), []byte("put")) @@ -10619,7 +10628,8 @@ func TestReplicaServersideRefreshes(t *testing.T) { _, _ = put("c-iput", "put1") return put("c-iput", "put2") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("c-iput", ts.Prev()) ba.CanForwardReadTimestamp = true iput := iPutArgs(roachpb.Key("c-iput"), []byte("put2")) @@ -10636,8 +10646,9 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return put("c-scan", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { expTS = ts.Next() + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("c-scan", ts.Prev()) ba.CanForwardReadTimestamp = true scan := scanArgs(roachpb.Key("c-scan"), roachpb.Key("c-scan\x00")) @@ -10654,7 +10665,8 @@ func TestReplicaServersideRefreshes(t *testing.T) { _, _ = put("d", "put") return put("d", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("d", ts.Prev()) cput := cPutArgs(ba.Txn.Key, []byte("cput"), []byte("put")) et, _ := endTxnArgs(ba.Txn, true /* commit */) @@ -10671,8 +10683,9 @@ func TestReplicaServersideRefreshes(t *testing.T) { _, _ = put("e", "put") return put("e", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { expTS = ts.Next() + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("e", ts.Prev()) ba.CanForwardReadTimestamp = true // necessary to indicate serverside-refresh is possible cput := cPutArgs(ba.Txn.Key, []byte("cput"), []byte("put")) @@ -10689,8 +10702,9 @@ func TestReplicaServersideRefreshes(t *testing.T) { _, _ = put("e", "put") return put("e", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { expTS = ts.Next() + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("e", ts.Prev()) ba.CanForwardReadTimestamp = true // necessary to indicate serverside-refresh is possible cput := cPutArgs(ba.Txn.Key, []byte("cput"), []byte("put")) @@ -10711,10 +10725,11 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return put("e1", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { txn := newTxn("e1", ts.Prev()) // Send write to another key first to avoid 1PC. + ba = &roachpb.BatchRequest{} ba.Txn = txn put := putArgs([]byte("e1-other-key"), []byte("otherput")) ba.Add(&put) @@ -10723,7 +10738,7 @@ func TestReplicaServersideRefreshes(t *testing.T) { panic(err) } - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Txn = txn // Indicate local retry is possible, even though we don't currently take // advantage of this. @@ -10753,10 +10768,11 @@ func TestReplicaServersideRefreshes(t *testing.T) { } return put("f3", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { expTS = ts.Next() // We're going to execute before any of the writes in setupFn. ts.Logical = 0 + ba = &roachpb.BatchRequest{} ba.Timestamp = ts for i := 1; i <= 3; i++ { cput := cPutArgs(roachpb.Key(fmt.Sprintf("f%d", i)), []byte("cput"), []byte("put")) @@ -10780,10 +10796,11 @@ func TestReplicaServersideRefreshes(t *testing.T) { } return put("ga3", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { expTS = ts.Next() // We're going to execute before any of the writes in setupFn. ts.Logical = 0 + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("ga1", ts) ba.CanForwardReadTimestamp = true // necessary to indicate serverside-refresh is possible for i := 1; i <= 3; i++ { @@ -10806,9 +10823,10 @@ func TestReplicaServersideRefreshes(t *testing.T) { } return get("h") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { txn := newTxn("h", ts.Prev()) // Send write to another key first to avoid 1PC. + ba = &roachpb.BatchRequest{} ba.Txn = txn put := putArgs([]byte("h2"), []byte("otherput")) ba.Add(&put) @@ -10818,7 +10836,7 @@ func TestReplicaServersideRefreshes(t *testing.T) { } // Send the remainder of the transaction in another batch. expTS = ts.Next() - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Txn = txn ba.CanForwardReadTimestamp = true // necessary to indicate serverside-refresh is possible cput := cPutArgs(ba.Txn.Key, []byte("cput"), []byte("put")) @@ -10836,7 +10854,8 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return get("a") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("a", ts.Prev()) ba.CanForwardReadTimestamp = true // necessary to indicate serverside-refresh is possible expTS = ts.Next() @@ -10856,9 +10875,10 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return put("lscan", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { // Txn with (read_ts, write_ts) = (1, 4) finds a value with // `ts = 2`. Final timestamp should be `ts = 4`. + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("lscan", ts.Prev()) ba.Txn.WriteTimestamp = ts.Next().Next() ba.CanForwardReadTimestamp = true @@ -10877,9 +10897,10 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return put("i", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { txn := newTxn("i", ts.Prev()) // Send write to another key first to avoid 1PC. + ba = &roachpb.BatchRequest{} ba.Txn = txn put1 := putArgs([]byte("i2"), []byte("otherput")) ba.Add(&put1) @@ -10889,7 +10910,7 @@ func TestReplicaServersideRefreshes(t *testing.T) { } // Send the remainder of the transaction in another batch. expTS = ts.Next() - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} ba.Txn = txn ba.CanForwardReadTimestamp = true // necessary to indicate serverside-refresh is possible put2 := putArgs(ba.Txn.Key, []byte("newput")) @@ -10910,7 +10931,8 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return put("a", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { + ba = &roachpb.BatchRequest{} ba.Timestamp = ts.Prev() // NOTE: set the TimestampFromServerClock field manually. This is // usually set on the server for non-transactional requests without @@ -10931,9 +10953,10 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return put("a", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { expTS = ts.Next() ts = ts.Prev() + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("a", ts) ba.Txn.GlobalUncertaintyLimit = expTS ba.CanForwardReadTimestamp = true // necessary to indicate serverside-refresh is possible @@ -10947,8 +10970,9 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return put("a", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { ts = ts.Prev() + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("a", ts) ba.Txn.GlobalUncertaintyLimit = ts.Next() get := getArgs(roachpb.Key("a")) @@ -10962,9 +10986,10 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return put("a", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { expTS = ts.Next() ts = ts.Prev() + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("a", ts) ba.Txn.GlobalUncertaintyLimit = expTS ba.CanForwardReadTimestamp = true // necessary to indicate serverside-refresh is possible @@ -10980,8 +11005,9 @@ func TestReplicaServersideRefreshes(t *testing.T) { setupFn: func() (hlc.Timestamp, error) { return put("a", "put") }, - batchFn: func(ts hlc.Timestamp) (ba roachpb.BatchRequest, expTS hlc.Timestamp) { + batchFn: func(ts hlc.Timestamp) (ba *roachpb.BatchRequest, expTS hlc.Timestamp) { ts = ts.Prev() + ba = &roachpb.BatchRequest{} ba.Txn = newTxn("a", ts) ba.Txn.GlobalUncertaintyLimit = ts.Next() get := getArgs(roachpb.Key("a")) @@ -11059,7 +11085,7 @@ func TestReplicaPushed1PC(t *testing.T) { // this difference is difficult to observe in a test. If we had // more detailed metrics we could assert that the 1PC path was // not even attempted here. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: &txn} put := putArgs(k, []byte("two")) et, _ := endTxnArgs(&txn, true) @@ -11102,7 +11128,7 @@ func TestReplicaNotifyLockTableOn1PC(t *testing.T) { // Create a new transaction and perform a "for update" scan. This should // acquire unreplicated, exclusive locks on the key. txn := newTransaction("test", key, 1, tc.Clock()) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn} ba.Add(roachpb.NewScan(key, key.Next(), true /* forUpdate */)) if _, pErr := tc.Sender().Send(ctx, ba); pErr != nil { @@ -11134,7 +11160,7 @@ func TestReplicaNotifyLockTableOn1PC(t *testing.T) { // Update the locked value and commit in a single batch. This should release // the "for update" lock. - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} incArgs := incrementArgs(key, 1) et, etH := endTxnArgs(txn, true /* commit */) et.Require1PC = true @@ -11183,7 +11209,7 @@ func TestReplicaQueryLocks(t *testing.T) { // Create a new transaction and perform a "for update" scan. This should // acquire unreplicated, exclusive locks on keys "a" and "b". txn := newTransaction("test", keyA, 1, tc.Clock()) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header = roachpb.Header{Txn: txn} ba.Add(roachpb.NewScan(keyA, keyB.Next(), true /* forUpdate */)) if _, pErr := tc.Sender().Send(ctx, ba); pErr != nil { @@ -11269,7 +11295,7 @@ func TestReplicaQueryLocks(t *testing.T) { // Update the locked value and commit in a single batch. This should release // the "for update" lock. - ba = roachpb.BatchRequest{} + ba = &roachpb.BatchRequest{} incArgs := incrementArgs(keyA, 1) et, etH := endTxnArgs(txn, true /* commit */) et.Require1PC = true @@ -13207,7 +13233,7 @@ func TestProposalNotAcknowledgedOrReproposedAfterApplication(t *testing.T) { st := tc.repl.CurrentLeaseStatus(ctx) txn := newTransaction("test", key, roachpb.NormalUserPriority, tc.Clock()) txnID = txn.ID - ba := roachpb.BatchRequest{ + ba := &roachpb.BatchRequest{ Header: roachpb.Header{ RangeID: tc.repl.RangeID, Txn: txn, @@ -13227,7 +13253,7 @@ func TestProposalNotAcknowledgedOrReproposedAfterApplication(t *testing.T) { _, tok := tc.repl.mu.proposalBuf.TrackEvaluatingRequest(ctx, hlc.MinTimestamp) sp := cfg.AmbientCtx.Tracer.StartSpan("replica send", tracing.WithForceRealSpan()) tracedCtx := tracing.ContextWithSpan(ctx, sp) - ch, _, _, _, pErr := tc.repl.evalAndPropose(tracedCtx, &ba, allSpansGuard(), &st, uncertainty.Interval{}, tok) + ch, _, _, _, pErr := tc.repl.evalAndPropose(tracedCtx, ba, allSpansGuard(), &st, uncertainty.Interval{}, tok) if pErr != nil { t.Fatal(pErr) } @@ -13305,34 +13331,34 @@ func TestReplicaTelemetryCounterForPushesDueToClosedTimestamp(t *testing.T) { { // Test the case where no bump occurs. name: "no bump", f: func(t *testing.T, r *Replica) { - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Add(putReq(keyA)) minReadTS := r.store.Clock().Now() ba.Timestamp = minReadTS.Next() - require.False(t, r.applyTimestampCache(ctx, &ba, minReadTS)) + require.False(t, r.applyTimestampCache(ctx, ba, minReadTS)) require.Equal(t, int32(0), telemetry.Read(batchesPushedDueToClosedTimestamp)) }, }, { // Test the case where the bump occurs due to minReadTS. name: "bump due to minTS", f: func(t *testing.T, r *Replica) { - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Add(putReq(keyA)) ba.Timestamp = r.store.Clock().Now() minReadTS := ba.Timestamp.Next() - require.True(t, r.applyTimestampCache(ctx, &ba, minReadTS)) + require.True(t, r.applyTimestampCache(ctx, ba, minReadTS)) require.Equal(t, int32(1), telemetry.Read(batchesPushedDueToClosedTimestamp)) }, }, { // Test the case where we bump due to the read ts cache rather than the minReadTS. name: "bump due to later read ts cache entry", f: func(t *testing.T, r *Replica) { - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Add(putReq(keyA)) ba.Timestamp = r.store.Clock().Now() minReadTS := ba.Timestamp.Next() r.store.tsCache.Add(keyA, keyA, minReadTS.Next(), uuid.MakeV4()) - require.True(t, r.applyTimestampCache(ctx, &ba, minReadTS)) + require.True(t, r.applyTimestampCache(ctx, ba, minReadTS)) require.Equal(t, int32(0), telemetry.Read(batchesPushedDueToClosedTimestamp)) }, }, @@ -13340,14 +13366,14 @@ func TestReplicaTelemetryCounterForPushesDueToClosedTimestamp(t *testing.T) { // Test the case where we do initially bump due to the minReadTS but then // bump again to a higher ts due to the read ts cache. name: "higher bump due to read ts cache entry", f: func(t *testing.T, r *Replica) { - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Add(putReq(keyA)) ba.Add(putReq(keyAA)) ba.Timestamp = r.store.Clock().Now() minReadTS := ba.Timestamp.Next() t.Log(ba.Timestamp, minReadTS, minReadTS.Next()) r.store.tsCache.Add(keyAA, keyAA, minReadTS.Next(), uuid.MakeV4()) - require.True(t, r.applyTimestampCache(ctx, &ba, minReadTS)) + require.True(t, r.applyTimestampCache(ctx, ba, minReadTS)) require.Equal(t, int32(0), telemetry.Read(batchesPushedDueToClosedTimestamp)) }, }, @@ -13391,12 +13417,12 @@ func TestContainsEstimatesClampProposal(t *testing.T) { someRequestToProposal := func(tc *testContext, ctx context.Context) *ProposalData { cmdIDKey := kvserverbase.CmdIDKey("some-cmdid-key") - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Timestamp = tc.Clock().Now() req := putArgs(roachpb.Key("some-key"), []byte("some-value")) ba.Add(&req) st := tc.repl.CurrentLeaseStatus(ctx) - proposal, err := tc.repl.requestToProposal(ctx, cmdIDKey, &ba, allSpansGuard(), &st, uncertainty.Interval{}) + proposal, err := tc.repl.requestToProposal(ctx, cmdIDKey, ba, allSpansGuard(), &st, uncertainty.Interval{}) if err != nil { t.Error(err) } @@ -13678,7 +13704,7 @@ func TestRangeInfoReturned(t *testing.T) { }, } { t.Run("", func(t *testing.T) { - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Add(&gArgs) ba.Header.ClientRangeInfo = test.req br, pErr := tc.Sender().Send(ctx, ba) @@ -13869,7 +13895,7 @@ func TestRangeSplitRacesWithRead(t *testing.T) { require.NoError(t, err) testutils.SucceedsSoon(t, func() error { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.RangeID = desc.RangeID ba.ReadConsistency = roachpb.INCONSISTENT ba.Add(&roachpb.QueryResolvedTimestampRequest{ @@ -14010,7 +14036,7 @@ func TestRangeSplitAndRHSRemovalRacesWithFollowerRead(t *testing.T) { require.NoError(t, err) testutils.SucceedsSoon(t, func() error { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.RangeID = desc.RangeID ba.ReadConsistency = roachpb.INCONSISTENT ba.Add(&roachpb.QueryResolvedTimestampRequest{ diff --git a/pkg/kv/kvserver/store_send.go b/pkg/kv/kvserver/store_send.go index 23b0a8d181b0..8c1f7fe804b7 100644 --- a/pkg/kv/kvserver/store_send.go +++ b/pkg/kv/kvserver/store_send.go @@ -43,7 +43,7 @@ import ( // of one of its writes), the response will have a transaction set which should // be used to update the client transaction object. func (s *Store) Send( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (br *roachpb.BatchResponse, pErr *roachpb.Error) { var writeBytes *StoreWriteBytes br, writeBytes, pErr = s.SendWithWriteBytes(ctx, ba) @@ -54,7 +54,7 @@ func (s *Store) Send( // SendWithWriteBytes is the implementation of Send with an additional // *StoreWriteBytes return value. func (s *Store) SendWithWriteBytes( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (br *roachpb.BatchResponse, writeBytes *StoreWriteBytes, pErr *roachpb.Error) { // Attach any log tags from the store to the context (which normally // comes from gRPC). @@ -329,7 +329,7 @@ func (s *Store) SendWithWriteBytes( // before a request acquires latches on a range. Otherwise, the request could // inadvertently block others while being throttled. func (s *Store) maybeThrottleBatch( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (limit.Reservation, error) { if !ba.IsSingleRequest() { return nil, nil @@ -415,8 +415,8 @@ func (s *Store) maybeThrottleBatch( // For more information, see the "Server-side negotiation fast-path" section of // docs/RFCS/20210519_bounded_staleness_reads.md. func (s *Store) executeServerSideBoundedStalenessNegotiation( - ctx context.Context, ba roachpb.BatchRequest, -) (roachpb.BatchRequest, *roachpb.Error) { + ctx context.Context, ba *roachpb.BatchRequest, +) (*roachpb.BatchRequest, *roachpb.Error) { if ba.BoundedStaleness == nil { log.Fatal(ctx, "BoundedStaleness header required for server-side negotiation fast-path") } @@ -440,7 +440,7 @@ func (s *Store) executeServerSideBoundedStalenessNegotiation( // Use one or more QueryResolvedTimestampRequests to compute a resolved // timestamp over the read spans on the local replica. - var queryResBa roachpb.BatchRequest + queryResBa := &roachpb.BatchRequest{} queryResBa.RangeID = ba.RangeID queryResBa.Replica = ba.Replica queryResBa.ClientRangeInfo = ba.ClientRangeInfo @@ -494,6 +494,7 @@ func (s *Store) executeServerSideBoundedStalenessNegotiation( resTS = cfg.MaxTimestampBound.Prev() } + ba = ba.ShallowCopy() ba.Timestamp = resTS ba.BoundedStaleness = nil return ba, nil diff --git a/pkg/kv/kvserver/store_test.go b/pkg/kv/kvserver/store_test.go index 098bbfd6ccd2..d7c2046a5c3b 100644 --- a/pkg/kv/kvserver/store_test.go +++ b/pkg/kv/kvserver/store_test.go @@ -76,7 +76,7 @@ var testIdent = roachpb.StoreIdent{ } func (s *Store) TestSender() kv.Sender { - return kv.Wrap(s, func(ba roachpb.BatchRequest) roachpb.BatchRequest { + return kv.Wrap(s, func(ba *roachpb.BatchRequest) *roachpb.BatchRequest { if ba.RangeID != 0 { return ba } @@ -1010,7 +1010,7 @@ func TestStoreAnnotateNow(t *testing.T) { txn.GlobalUncertaintyLimit = hlc.MaxTimestamp assignSeqNumsForReqs(txn, &pArgs) } - ba := roachpb.BatchRequest{ + ba := &roachpb.BatchRequest{ Header: roachpb.Header{ Txn: txn, Replica: desc, @@ -1116,7 +1116,7 @@ func TestStoreSendWithZeroTime(t *testing.T) { store, _ := createTestStore(ctx, t, testStoreOpts{createSystemRanges: true}, stopper) args := getArgs([]byte("a")) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add(&args) br, pErr := store.TestSender().Send(ctx, ba) if pErr != nil { @@ -1983,7 +1983,7 @@ func TestStoreScanResumeTSCache(t *testing.T) { t3 := timeutil.Unix(4, 0) manualClock.MustAdvanceTo(t3) h.Timestamp = makeTS(t3.UnixNano(), 0) - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Header = h ba.Add(getArgsString("a"), getArgsString("b"), getArgsString("c")) br, pErr := store.TestSender().Send(ctx, ba) @@ -2053,7 +2053,7 @@ func TestStoreSkipLockedTSCache(t *testing.T) { // Read the span at t2 using a SkipLocked wait policy. t2 := timeutil.Unix(3, 0) manualClock.MustAdvanceTo(t2) - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Timestamp = makeTS(t2.UnixNano(), 0) ba.WaitPolicy = lock.WaitPolicy_SkipLocked ba.Add(tc.reqs...) @@ -2338,7 +2338,7 @@ func TestStoreScanMultipleIntents(t *testing.T) { key1 := roachpb.Key("key00") key10 := roachpb.Key("key09") txn := newTransaction("test", key1, 1, store.cfg.Clock) - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} for i := 0; i < 10; i++ { pArgs := putArgs(roachpb.Key(fmt.Sprintf("key%02d", i)), []byte("value")) ba.Add(&pArgs) diff --git a/pkg/kv/kvserver/stores.go b/pkg/kv/kvserver/stores.go index 60cdd1b6d8fc..94d988a74add 100644 --- a/pkg/kv/kvserver/stores.go +++ b/pkg/kv/kvserver/stores.go @@ -180,7 +180,7 @@ func (ls *Stores) GetReplicaForRangeID( // Send implements the client.Sender interface. The store is looked up from the // store map using the ID specified in the request. func (ls *Stores) Send( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { br, writeBytes, pErr := ls.SendWithWriteBytes(ctx, ba) writeBytes.Release() @@ -213,7 +213,7 @@ func (wb *StoreWriteBytes) Release() { // SendWithWriteBytes is the implementation of Send with an additional // *StoreWriteBytes return value. func (ls *Stores) SendWithWriteBytes( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *StoreWriteBytes, *roachpb.Error) { if err := ba.ValidateForEvaluation(); err != nil { log.Fatalf(ctx, "invalid batch (%s): %s", ba, err) diff --git a/pkg/kv/kvserver/txnrecovery/manager_test.go b/pkg/kv/kvserver/txnrecovery/manager_test.go index b0ab0e4f4465..f3a7f0d40d12 100644 --- a/pkg/kv/kvserver/txnrecovery/manager_test.go +++ b/pkg/kv/kvserver/txnrecovery/manager_test.go @@ -30,7 +30,7 @@ func makeManager(s *kv.Sender) (Manager, *hlc.Clock, *stop.Stopper) { clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */) stopper := stop.NewStopper() db := kv.NewDB(ac, kv.NonTransactionalFactoryFunc(func( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { return (*s).Send(ctx, ba) }), clock, stopper) @@ -91,7 +91,7 @@ func TestResolveIndeterminateCommit(t *testing.T) { } mockSender = kv.SenderFunc(func( - _ context.Context, ba roachpb.BatchRequest, + _ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // Probing Phase. assertMetrics(t, m, metricVals{attemptsPending: 1, attempts: 1}) @@ -111,7 +111,7 @@ func TestResolveIndeterminateCommit(t *testing.T) { br.Responses[2].GetInner().(*roachpb.QueryIntentResponse).FoundIntent = !prevent mockSender = kv.SenderFunc(func( - _ context.Context, ba roachpb.BatchRequest, + _ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // Recovery Phase. assertMetrics(t, m, metricVals{attemptsPending: 1, attempts: 1}) @@ -268,7 +268,7 @@ func TestResolveIndeterminateCommitTxnChanges(t *testing.T) { for _, c := range testCases { t.Run(c.name, func(t *testing.T) { mockSender = kv.SenderFunc(func( - _ context.Context, ba roachpb.BatchRequest, + _ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // Probing Phase. assertMetrics(t, m, expMetrics.merge(metricVals{attemptsPending: 1, attempts: 1})) @@ -292,7 +292,7 @@ func TestResolveIndeterminateCommitTxnChanges(t *testing.T) { br.Responses[2].GetInner().(*roachpb.QueryIntentResponse).FoundIntent = false mockSender = kv.SenderFunc(func( - _ context.Context, ba roachpb.BatchRequest, + _ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // Recovery Phase. assert.False(t, c.duringProbing, "the recovery phase should not be run") @@ -342,7 +342,7 @@ func TestResolveIndeterminateCommitTxnWithoutInFlightWrites(t *testing.T) { txn := makeStagingTransaction(clock) mockSender = kv.SenderFunc(func( - _ context.Context, ba roachpb.BatchRequest, + _ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // Recovery Phase. Probing phase skipped. assert.Equal(t, 1, len(ba.Requests)) diff --git a/pkg/kv/kvserver/txnwait/queue_test.go b/pkg/kv/kvserver/txnwait/queue_test.go index 3bc3caa3d008..131bdef457e2 100644 --- a/pkg/kv/kvserver/txnwait/queue_test.go +++ b/pkg/kv/kvserver/txnwait/queue_test.go @@ -190,7 +190,7 @@ func TestMaybeWaitForPushWithContextCancellation(t *testing.T) { var mockSender kv.SenderFunc cfg := makeConfig(func( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { return mockSender(ctx, ba) }, stopper) @@ -203,7 +203,7 @@ func TestMaybeWaitForPushWithContextCancellation(t *testing.T) { // Mock out responses to any QueryTxn requests. mockSender = func( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() resp := br.Responses[0].GetInner().(*roachpb.QueryTxnResponse) @@ -277,7 +277,7 @@ func TestPushersReleasedAfterAnyQueryTxnFindsAbortedTxn(t *testing.T) { defer stopper.Stop(context.Background()) var mockSender kv.SenderFunc cfg := makeConfig(func( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { return mockSender(ctx, ba) }, stopper) @@ -295,7 +295,7 @@ func TestPushersReleasedAfterAnyQueryTxnFindsAbortedTxn(t *testing.T) { const numPushees = 3 var queryTxnCount int32 mockSender = func( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() resp := br.Responses[0].GetInner().(*roachpb.QueryTxnResponse) diff --git a/pkg/kv/mock_transactional_sender.go b/pkg/kv/mock_transactional_sender.go index d807546ad2a8..8d58b93486dd 100644 --- a/pkg/kv/mock_transactional_sender.go +++ b/pkg/kv/mock_transactional_sender.go @@ -21,7 +21,7 @@ import ( // MockTransactionalSender allows a function to be used as a TxnSender. type MockTransactionalSender struct { senderFunc func( - context.Context, *roachpb.Transaction, roachpb.BatchRequest, + context.Context, *roachpb.Transaction, *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) txn roachpb.Transaction } @@ -30,7 +30,7 @@ type MockTransactionalSender struct { // The passed in txn is cloned. func NewMockTransactionalSender( f func( - context.Context, *roachpb.Transaction, roachpb.BatchRequest, + context.Context, *roachpb.Transaction, *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error), txn *roachpb.Transaction, ) *MockTransactionalSender { @@ -39,7 +39,7 @@ func NewMockTransactionalSender( // Send is part of the TxnSender interface. func (m *MockTransactionalSender) Send( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { return m.senderFunc(ctx, &m.txn, ba) } @@ -237,7 +237,7 @@ func (m *MockTransactionalSender) HasPerformedWrites() bool { // MockTxnSenderFactory is a TxnSenderFactory producing MockTxnSenders. type MockTxnSenderFactory struct { - senderFunc func(context.Context, *roachpb.Transaction, roachpb.BatchRequest) ( + senderFunc func(context.Context, *roachpb.Transaction, *roachpb.BatchRequest) ( *roachpb.BatchResponse, *roachpb.Error) nonTxnSenderFunc Sender } @@ -249,7 +249,7 @@ var _ TxnSenderFactory = MockTxnSenderFactory{} // function is responsible for putting the txn inside the batch, if needed. func MakeMockTxnSenderFactory( senderFunc func( - context.Context, *roachpb.Transaction, roachpb.BatchRequest, + context.Context, *roachpb.Transaction, *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error), ) MockTxnSenderFactory { return MockTxnSenderFactory{ @@ -262,7 +262,7 @@ func MakeMockTxnSenderFactory( // requests. func MakeMockTxnSenderFactoryWithNonTxnSender( senderFunc func( - context.Context, *roachpb.Transaction, roachpb.BatchRequest, + context.Context, *roachpb.Transaction, *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error), nonTxnSenderFunc SenderFunc, ) MockTxnSenderFactory { diff --git a/pkg/kv/range_lookup.go b/pkg/kv/range_lookup.go index b4a618d37827..39ee80322ae7 100644 --- a/pkg/kv/range_lookup.go +++ b/pkg/kv/range_lookup.go @@ -275,7 +275,7 @@ func lookupRangeFwdScan( return nil, nil, errors.Wrap(err, "could not create scan bounds for range lookup") } - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.ReadConsistency = rc if prefetchReverse { // Even if we're prefetching in the reverse direction, we still scan @@ -368,7 +368,7 @@ func lookupRangeRevScan( return nil, nil, errors.Wrap(err, "could not create scan bounds for reverse range lookup") } - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.ReadConsistency = rc ba.MaxSpanRequestKeys = maxKeys ba.Add(&roachpb.ReverseScanRequest{ @@ -430,7 +430,7 @@ func kvsToRangeDescriptors(kvs []roachpb.KeyValue) ([]roachpb.RangeDescriptor, e // TestingIsRangeLookup returns if the provided BatchRequest looks like a single // RangeLookup scan. It can return false positives and should only be used in // tests. -func TestingIsRangeLookup(ba roachpb.BatchRequest) bool { +func TestingIsRangeLookup(ba *roachpb.BatchRequest) bool { if ba.IsSingleRequest() { return TestingIsRangeLookupRequest(ba.Requests[0].GetInner()) } diff --git a/pkg/kv/range_lookup_test.go b/pkg/kv/range_lookup_test.go index a5cf9056c399..7c678bcfd199 100644 --- a/pkg/kv/range_lookup_test.go +++ b/pkg/kv/range_lookup_test.go @@ -50,7 +50,7 @@ func TestRangeLookupRaceSplits(t *testing.T) { } lookupKey := roachpb.Key("k") - assertRangeLookupScan := func(ba roachpb.BatchRequest) { + assertRangeLookupScan := func(ba *roachpb.BatchRequest) { if len(ba.Requests) != 1 { t.Fatalf("expected single request, found %v", ba) } @@ -79,7 +79,7 @@ func TestRangeLookupRaceSplits(t *testing.T) { goodRes := newScanRespFromRangeDescriptors(&desc1AfterSplit) attempt := 0 - sender := SenderFunc(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender := SenderFunc(func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { // Increment the attempt counter after each attempt. defer func() { attempt++ @@ -137,7 +137,7 @@ func TestRangeLookupRaceSplits(t *testing.T) { } attempt := 0 - sender := SenderFunc(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + sender := SenderFunc(func(_ context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { // Increment the attempt counter after each attempt. defer func() { attempt++ diff --git a/pkg/kv/sender.go b/pkg/kv/sender.go index 25450f472284..f5f05bc16d06 100644 --- a/pkg/kv/sender.go +++ b/pkg/kv/sender.go @@ -63,7 +63,7 @@ type Sender interface { // concurrent requests, it waits for all of them before returning, // even in error cases. // - // Once the request reaches the `transport` module, anothern + // Once the request reaches the `transport` module, another // restriction applies (particularly relevant for the case when the // node that the transport is talking to is local, and so there's // not gRPC marshaling/unmarshaling): @@ -86,7 +86,7 @@ type Sender interface { // about what the client should update, as opposed to a full txn // that the client is expected to diff with its copy and apply all // the updates. - Send(context.Context, roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) + Send(context.Context, *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) } // TxnSender is the interface used to call into a CockroachDB instance @@ -394,11 +394,11 @@ type TxnSenderFactory interface { // SenderFunc is an adapter to allow the use of ordinary functions as // Senders. -type SenderFunc func(context.Context, roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) +type SenderFunc func(context.Context, *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) // Send calls f(ctx, c). func (f SenderFunc) Send( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { return f(ctx, ba) } @@ -450,7 +450,7 @@ func SendWrappedWithAdmission( ah roachpb.AdmissionHeader, args roachpb.Request, ) (roachpb.Response, *roachpb.Error) { - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Header = h ba.AdmissionHeader = ah ba.Add(args) @@ -477,8 +477,8 @@ func SendWrapped( // Wrap returns a Sender which applies the given function before delegating to // the supplied Sender. -func Wrap(sender Sender, f func(roachpb.BatchRequest) roachpb.BatchRequest) Sender { - return SenderFunc(func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { +func Wrap(sender Sender, f func(*roachpb.BatchRequest) *roachpb.BatchRequest) Sender { + return SenderFunc(func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { return sender.Send(ctx, f(ba)) }) } diff --git a/pkg/kv/txn.go b/pkg/kv/txn.go index 990cc8eb0185..b675e60ddac9 100644 --- a/pkg/kv/txn.go +++ b/pkg/kv/txn.go @@ -679,7 +679,7 @@ func (txn *Txn) commit(ctx context.Context) error { // will be subject to admission control, and the zero CreateTime will give // it preference within the tenant. et := endTxnReq(true, txn.deadline()) - ba := roachpb.BatchRequest{Requests: et.unionArr[:]} + ba := &roachpb.BatchRequest{Requests: et.unionArr[:]} _, pErr := txn.Send(ctx, ba) if pErr == nil { for _, t := range txn.commitTriggers { @@ -853,7 +853,7 @@ func (txn *Txn) rollback(ctx context.Context) *roachpb.Error { // settings, it will be subject to admission control, and the zero // CreateTime will give it preference within the tenant. et := endTxnReq(false, hlc.Timestamp{} /* deadline */) - ba := roachpb.BatchRequest{Requests: et.unionArr[:]} + ba := &roachpb.BatchRequest{Requests: et.unionArr[:]} _, pErr := txn.Send(ctx, ba) if pErr == nil { return nil @@ -879,7 +879,7 @@ func (txn *Txn) rollback(ctx context.Context) *roachpb.Error { // settings, it will be subject to admission control, and the zero // CreateTime will give it preference within the tenant. et := endTxnReq(false, hlc.Timestamp{} /* deadline */) - ba := roachpb.BatchRequest{Requests: et.unionArr[:]} + ba := &roachpb.BatchRequest{Requests: et.unionArr[:]} _ = contextutil.RunWithTimeout(ctx, "async txn rollback", asyncRollbackTimeout, func(ctx context.Context) error { if _, pErr := txn.Send(ctx, ba); pErr != nil { @@ -1060,7 +1060,7 @@ func (txn *Txn) IsRetryableErrMeantForTxn( // commit or clean-up explicitly even when that may not be required // (or even erroneous). Returns (nil, nil) for an empty batch. func (txn *Txn) Send( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { // Fill in the GatewayNodeID on the batch if the txn knows it. // NOTE(andrei): It seems a bit ugly that we're filling in the batches here as @@ -1152,7 +1152,7 @@ func (txn *Txn) handleRetryableErrLocked( // and perform the read. Callers can use this flexibility to trade off increased // staleness for reduced latency. func (txn *Txn) NegotiateAndSend( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if err := txn.checkNegotiateAndSendPreconditions(ctx, ba); err != nil { return nil, roachpb.NewError(err) @@ -1215,7 +1215,7 @@ func (txn *Txn) NegotiateAndSend( // checks preconditions on BatchRequest and Txn for NegotiateAndSend. func (txn *Txn) checkNegotiateAndSendPreconditions( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (err error) { assert := func(b bool, s string) { if !b { diff --git a/pkg/kv/txn_external_test.go b/pkg/kv/txn_external_test.go index 48ef5d103a2a..8824aedc224a 100644 --- a/pkg/kv/txn_external_test.go +++ b/pkg/kv/txn_external_test.go @@ -120,7 +120,7 @@ func TestRollbackAfterAmbiguousCommit(t *testing.T) { var key roachpb.Key commitBlocked := make(chan struct{}) onCommitReqFilter := func( - ba roachpb.BatchRequest, fn func(et *roachpb.EndTxnRequest) *roachpb.Error, + ba *roachpb.BatchRequest, fn func(et *roachpb.EndTxnRequest) *roachpb.Error, ) *roachpb.Error { if atomic.LoadInt64(&filterSet) == 0 { return nil @@ -135,7 +135,7 @@ func TestRollbackAfterAmbiguousCommit(t *testing.T) { } return nil } - blockCommitReqFilter := func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + blockCommitReqFilter := func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { return onCommitReqFilter(ba, func(et *roachpb.EndTxnRequest) *roachpb.Error { // Inform the test that the commit is blocked. commitBlocked <- struct{}{} @@ -146,7 +146,7 @@ func TestRollbackAfterAmbiguousCommit(t *testing.T) { return roachpb.NewError(ctx.Err()) }) } - addInFlightWriteToCommitReqFilter := func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + addInFlightWriteToCommitReqFilter := func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { return onCommitReqFilter(ba, func(et *roachpb.EndTxnRequest) *roachpb.Error { // Add a fake in-flight write. et.InFlightWrites = append(et.InFlightWrites, roachpb.SequencedWrite{ @@ -166,7 +166,7 @@ func TestRollbackAfterAmbiguousCommit(t *testing.T) { // do this either before or after the request completes, depending // on the status that the test wants the txn record to be in when // the rollback is performed. - TestingRequestFilter: func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if testCase.txnStatus == roachpb.PENDING { // Block and reject before the request writes the txn record. return blockCommitReqFilter(ctx, ba) @@ -180,7 +180,7 @@ func TestRollbackAfterAmbiguousCommit(t *testing.T) { } return nil }, - TestingResponseFilter: func(ctx context.Context, ba roachpb.BatchRequest, _ *roachpb.BatchResponse) *roachpb.Error { + TestingResponseFilter: func(ctx context.Context, ba *roachpb.BatchRequest, _ *roachpb.BatchResponse) *roachpb.Error { if testCase.txnStatus != roachpb.PENDING { // Block and reject after the request writes the txn record. return blockCommitReqFilter(ctx, ba) @@ -427,7 +427,7 @@ func testTxnNegotiateAndSendDoesNotBlock(t *testing.T, multiRange, strict, route // error (WriteIntentError) under conditions that would otherwise // cause us to block on an intent. Otherwise, allow the request to be // redirected to the leaseholder and to block on intents. - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} if strict { ba.BoundedStaleness = &roachpb.BoundedStalenessHeader{ MinTimestampBound: minTSBound, diff --git a/pkg/kv/txn_test.go b/pkg/kv/txn_test.go index 9934ee252a95..ffcb560855cf 100644 --- a/pkg/kv/txn_test.go +++ b/pkg/kv/txn_test.go @@ -81,11 +81,11 @@ func TestTxnVerboseTrace(t *testing.T) { } func newTestTxnFactory( - createReply func(roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error), + createReply func(*roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error), ) TxnSenderFactory { return MakeMockTxnSenderFactory( func( - ctx context.Context, txn *roachpb.Transaction, ba roachpb.BatchRequest, + ctx context.Context, txn *roachpb.Transaction, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { if ba.UserPriority == 0 { ba.UserPriority = 1 @@ -143,7 +143,7 @@ func TestInitPut(t *testing.T) { // TODO(vivekmenezes): update test or remove when InitPut is being // considered sufficiently tested and this path exercised. clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */) - db := NewDB(log.MakeTestingAmbientCtxWithNewTracer(), newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(log.MakeTestingAmbientCtxWithNewTracer(), newTestTxnFactory(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { br := ba.CreateReply() return br, nil }), clock, stopper) @@ -212,7 +212,7 @@ func TestCommitTransactionOnce(t *testing.T) { defer stopper.Stop(ctx) clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */) count := 0 - db := NewDB(log.MakeTestingAmbientCtxWithNewTracer(), newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(log.MakeTestingAmbientCtxWithNewTracer(), newTestTxnFactory(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { count++ return ba.CreateReply(), nil }), clock, stopper) @@ -238,7 +238,7 @@ func TestAbortMutatingTransaction(t *testing.T) { defer stopper.Stop(ctx) clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */) var calls []roachpb.Method - db := NewDB(log.MakeTestingAmbientCtxWithNewTracer(), newTestTxnFactory(func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + db := NewDB(log.MakeTestingAmbientCtxWithNewTracer(), newTestTxnFactory(func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { calls = append(calls, ba.Methods()...) if et, ok := ba.GetArg(roachpb.EndTxn); ok && et.(*roachpb.EndTxnRequest).Commit { t.Errorf("expected commit to be false") @@ -291,7 +291,7 @@ func TestRunTransactionRetryOnErrors(t *testing.T) { defer stopper.Stop(ctx) count := 0 db := NewDB(log.MakeTestingAmbientCtxWithNewTracer(), newTestTxnFactory( - func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if _, ok := ba.GetArg(roachpb.Put); ok { count++ @@ -416,7 +416,7 @@ func TestSetPriority(t *testing.T) { clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */) var expected roachpb.UserPriority db := NewDB(log.MakeTestingAmbientCtxWithNewTracer(), newTestTxnFactory( - func(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { + func(ba *roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) { if ba.UserPriority != expected { pErr := roachpb.NewErrorf("Priority not set correctly in the batch! "+ "(expected: %s, value: %s)", expected, ba.UserPriority) @@ -434,7 +434,7 @@ func TestSetPriority(t *testing.T) { if err := txn.SetUserPriority(expected); err != nil { t.Fatal(err) } - if _, pErr := txn.Send(ctx, roachpb.BatchRequest{}); pErr != nil { + if _, pErr := txn.Send(ctx, &roachpb.BatchRequest{}); pErr != nil { t.Fatal(pErr) } @@ -442,7 +442,7 @@ func TestSetPriority(t *testing.T) { expected = roachpb.UserPriority(-13) txn = NewTxn(ctx, db, 0 /* gatewayNodeID */) txn.TestingSetPriority(13) - if _, pErr := txn.Send(ctx, roachpb.BatchRequest{}); pErr != nil { + if _, pErr := txn.Send(ctx, &roachpb.BatchRequest{}); pErr != nil { t.Fatal(pErr) } } @@ -505,7 +505,7 @@ func TestUpdateDeadlineMaybe(t *testing.T) { clock := hlc.NewClock(timeutil.NewManualTime(timeutil.Unix(0, 1)), time.Nanosecond /* maxOffset */) db := NewDB(log.MakeTestingAmbientCtxWithNewTracer(), MakeMockTxnSenderFactory( - func(context.Context, *roachpb.Transaction, roachpb.BatchRequest, + func(context.Context, *roachpb.Transaction, *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { return nil, nil }), clock, stopper) @@ -554,7 +554,7 @@ func TestTxnNegotiateAndSend(t *testing.T) { ts20 := hlc.Timestamp{WallTime: 20} clock := hlc.NewClock(timeutil.NewManualTime(timeutil.Unix(0, 1)), time.Nanosecond /* maxOffset */) txnSender := MakeMockTxnSenderFactoryWithNonTxnSender(nil /* senderFunc */, func( - _ context.Context, ba roachpb.BatchRequest, + _ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { require.NotNil(t, ba.BoundedStaleness) require.Equal(t, ts10, ba.BoundedStaleness.MinTimestampBound) @@ -571,7 +571,7 @@ func TestTxnNegotiateAndSend(t *testing.T) { db := NewDB(log.MakeTestingAmbientCtxWithNewTracer(), txnSender, clock, stopper) txn := NewTxn(ctx, db, 0 /* gatewayNodeID */) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.BoundedStaleness = &roachpb.BoundedStalenessHeader{ MinTimestampBound: ts10, } @@ -666,7 +666,7 @@ func TestTxnNegotiateAndSendWithDeadline(t *testing.T) { t.Run(test.name, func(t *testing.T) { clock := hlc.NewClock(timeutil.NewManualTime(timeutil.Unix(0, 1)), time.Nanosecond /* maxOffset */) txnSender := MakeMockTxnSenderFactoryWithNonTxnSender(nil /* senderFunc */, func( - _ context.Context, ba roachpb.BatchRequest, + _ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { require.NotNil(t, ba.BoundedStaleness) require.Equal(t, minTSBound, ba.BoundedStaleness.MinTimestampBound) @@ -681,7 +681,7 @@ func TestTxnNegotiateAndSendWithDeadline(t *testing.T) { txn := NewTxn(ctx, db, 0 /* gatewayNodeID */) require.NoError(t, txn.UpdateDeadline(ctx, test.txnDeadline)) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.BoundedStaleness = &roachpb.BoundedStalenessHeader{ MinTimestampBound: minTSBound, MaxTimestampBound: test.maxTSBound, @@ -723,7 +723,7 @@ func TestTxnNegotiateAndSendWithResumeSpan(t *testing.T) { ts20 := hlc.Timestamp{WallTime: 20} clock := hlc.NewClock(timeutil.NewManualTime(timeutil.Unix(0, 1)), time.Nanosecond /* maxOffset */) txnSender := MakeMockTxnSenderFactoryWithNonTxnSender(nil /* senderFunc */, func( - _ context.Context, ba roachpb.BatchRequest, + _ context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { require.NotNil(t, ba.BoundedStaleness) require.Equal(t, ts10, ba.BoundedStaleness.MinTimestampBound) @@ -751,7 +751,7 @@ func TestTxnNegotiateAndSendWithResumeSpan(t *testing.T) { db := NewDB(log.MakeTestingAmbientCtxWithNewTracer(), txnSender, clock, stopper) txn := NewTxn(ctx, db, 0 /* gatewayNodeID */) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.BoundedStaleness = &roachpb.BoundedStalenessHeader{ MinTimestampBound: ts10, } diff --git a/pkg/roachpb/api.go b/pkg/roachpb/api.go index f32d69394125..a8fe3074064e 100644 --- a/pkg/roachpb/api.go +++ b/pkg/roachpb/api.go @@ -57,7 +57,7 @@ const ( // SupportsBatch determines whether the methods in the provided batch // are supported by the ReadConsistencyType, returning an error if not. -func (rc ReadConsistencyType) SupportsBatch(ba BatchRequest) error { +func (rc ReadConsistencyType) SupportsBatch(ba *BatchRequest) error { switch rc { case CONSISTENT: return nil diff --git a/pkg/roachpb/api_test.go b/pkg/roachpb/api_test.go index 3dbbfa4f78dc..36ce4b0fd15b 100644 --- a/pkg/roachpb/api_test.go +++ b/pkg/roachpb/api_test.go @@ -13,7 +13,6 @@ package roachpb import ( "reflect" "testing" - "unsafe" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock" "github.com/cockroachdb/cockroach/pkg/storage/enginepb" @@ -407,11 +406,3 @@ func TestFlagCombinations(t *testing.T) { } } } - -// TestBatchRequestSize asserts that the size of BatchRequest remains below 256 -// bytes. In #86541, we found that once the size reaches or exceeds this size, -// end-to-end benchmarks observe a large (~10%) performance regression. -func TestBatchRequestSize(t *testing.T) { - size := int(unsafe.Sizeof(BatchRequest{})) - require.Less(t, size, 256) -} diff --git a/pkg/roachpb/batch.go b/pkg/roachpb/batch.go index 98f1e8c3f848..ea22bdd30a30 100644 --- a/pkg/roachpb/batch.go +++ b/pkg/roachpb/batch.go @@ -50,6 +50,12 @@ func (h Header) RequiredFrontier() hlc.Timestamp { return h.Timestamp } +// ShallowCopy returns a shallow copy of the receiver. +func (ba *BatchRequest) ShallowCopy() *BatchRequest { + shallowCopy := *ba + return &shallowCopy +} + // SetActiveTimestamp sets the correct timestamp at which the request is to be // carried out. For transactional requests, ba.Timestamp must be zero initially // and it will be set to txn.ReadTimestamp (note though this mostly impacts diff --git a/pkg/rpc/context.go b/pkg/rpc/context.go index 04d0886a20f9..41c247d914db 100644 --- a/pkg/rpc/context.go +++ b/pkg/rpc/context.go @@ -720,6 +720,7 @@ func makeInternalClientAdapter( clientStreamInterceptors: clientStreamInterceptors, serverStreamInterceptors: serverStreamInterceptors, batchHandler: func(ctx context.Context, ba *roachpb.BatchRequest, opts ...grpc.CallOption) (*roachpb.BatchResponse, error) { + ba = ba.ShallowCopy() // Mark this as originating locally, which is useful for the decision about // memory allocation tracking. ba.AdmissionHeader.SourceLocation = roachpb.AdmissionHeader_LOCAL diff --git a/pkg/server/admin.go b/pkg/server/admin.go index 8836325d168e..7e1c465b88d5 100644 --- a/pkg/server/admin.go +++ b/pkg/server/admin.go @@ -3118,7 +3118,7 @@ func (s *adminServer) SendKVBatch( } sp.finish(br, redact) }() - br, pErr := s.server.db.NonTransactionalSender().Send(ctx, *ba) + br, pErr := s.server.db.NonTransactionalSender().Send(ctx, ba) if br == nil { br = &roachpb.BatchResponse{} } diff --git a/pkg/server/node.go b/pkg/server/node.go index b9ce42fc7139..895f727a5d12 100644 --- a/pkg/server/node.go +++ b/pkg/server/node.go @@ -1112,7 +1112,7 @@ func (n *Node) batchInternal( writeBytes.Release() }() var pErr *roachpb.Error - br, writeBytes, pErr = n.stores.SendWithWriteBytes(ctx, *args) + br, writeBytes, pErr = n.stores.SendWithWriteBytes(ctx, args) if pErr != nil { br = &roachpb.BatchResponse{} log.VErrEventf(ctx, 3, "error from stores.Send: %s", pErr) diff --git a/pkg/server/systemconfigwatcher/systemconfigwatchertest/test_system_config_watcher.go b/pkg/server/systemconfigwatcher/systemconfigwatchertest/test_system_config_watcher.go index 3cd568ab8fd6..c5306620a6dd 100644 --- a/pkg/server/systemconfigwatcher/systemconfigwatchertest/test_system_config_watcher.go +++ b/pkg/server/systemconfigwatcher/systemconfigwatchertest/test_system_config_watcher.go @@ -131,7 +131,7 @@ func getSystemDescriptorAndZonesSpans( ctx context.Context, t *testing.T, codec keys.SQLCodec, kvDB *kv.DB, ) []roachpb.KeyValue { scanSpanForRows := func(startKey, endKey roachpb.Key) (rows []roachpb.KeyValue) { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Add( roachpb.NewScan( append(codec.TenantPrefix(), startKey...), diff --git a/pkg/sql/ambiguous_commit_test.go b/pkg/sql/ambiguous_commit_test.go index 5fe4aa74a029..4cfcfd7d5b40 100644 --- a/pkg/sql/ambiguous_commit_test.go +++ b/pkg/sql/ambiguous_commit_test.go @@ -39,11 +39,11 @@ import ( type interceptingTransport struct { kvcoord.Transport - sendNext func(context.Context, roachpb.BatchRequest) (*roachpb.BatchResponse, error) + sendNext func(context.Context, *roachpb.BatchRequest) (*roachpb.BatchResponse, error) } func (t *interceptingTransport) SendNext( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { if fn := t.sendNext; fn != nil { return fn(ctx, ba) @@ -100,7 +100,7 @@ func TestAmbiguousCommit(t *testing.T) { transport, err := kvcoord.GRPCTransportFactory(opts, nodeDialer, replicas) return &interceptingTransport{ Transport: transport, - sendNext: func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + sendNext: func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { if ambiguousSuccess { br, err := transport.SendNext(ctx, ba) // During shutdown, we may get responses that @@ -134,7 +134,7 @@ func TestAmbiguousCommit(t *testing.T) { if ambiguousSuccess { params.Knobs.Store = &kvserver.StoreTestingKnobs{ TestingResponseFilter: func( - ctx context.Context, args roachpb.BatchRequest, _ *roachpb.BatchResponse, + ctx context.Context, args *roachpb.BatchRequest, _ *roachpb.BatchResponse, ) *roachpb.Error { if req, ok := args.GetArg(roachpb.ConditionalPut); ok { return maybeRPCError(req.(*roachpb.ConditionalPutRequest)) diff --git a/pkg/sql/backfill/mvcc_index_merger.go b/pkg/sql/backfill/mvcc_index_merger.go index 0e8c8bb72c30..e3758ebf2707 100644 --- a/pkg/sql/backfill/mvcc_index_merger.go +++ b/pkg/sql/backfill/mvcc_index_merger.go @@ -271,7 +271,7 @@ func (ibm *IndexBackfillMerger) scan( } // For now just grab all of the destination KVs and merge the corresponding entries. log.VInfof(ctx, 2, "scanning batch [%s, %s) at %v to merge", startKey, endKey, readAsOf) - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.TargetBytes = chunkBytes if err := ibm.growBoundAccount(ctx, chunkBytes); err != nil { return errors.Wrap(err, "failed to fetch keys to merge from temp index") diff --git a/pkg/sql/catalog/descs/collection_test.go b/pkg/sql/catalog/descs/collection_test.go index b93f35f08861..5be07fb1f607 100644 --- a/pkg/sql/catalog/descs/collection_test.go +++ b/pkg/sql/catalog/descs/collection_test.go @@ -155,7 +155,7 @@ func TestTxnClearsCollectionOnRetry(t *testing.T) { var serverArgs base.TestServerArgs params := base.TestClusterArgs{ServerArgs: serverArgs} params.ServerArgs.Knobs.Store = &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, r roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, r *roachpb.BatchRequest) *roachpb.Error { if r.Txn == nil || r.Txn.Name != txnName { return nil } diff --git a/pkg/sql/catalog/lease/lease_test.go b/pkg/sql/catalog/lease/lease_test.go index e6f7be31d358..c7f62775b329 100644 --- a/pkg/sql/catalog/lease/lease_test.go +++ b/pkg/sql/catalog/lease/lease_test.go @@ -2744,7 +2744,7 @@ func TestOfflineLeaseRefresh(t *testing.T) { var mu syncutil.RWMutex knobs := &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, req roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, req *roachpb.BatchRequest) *roachpb.Error { mu.RLock() checkRequest := req.Txn != nil && req.Txn.ID.Equal(txnID) mu.RUnlock() @@ -2874,7 +2874,7 @@ func TestLeaseTxnDeadlineExtension(t *testing.T) { // require the lease to be reacquired. lease.LeaseDuration.Override(ctx, ¶ms.SV, 0) params.Knobs.Store = &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, req roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, req *roachpb.BatchRequest) *roachpb.Error { filterMu.Lock() // Wait for a commit with the txnID, and only allows // it to resume when the channel gets unblocked. @@ -3194,7 +3194,7 @@ func TestAmbiguousResultIsRetried(t *testing.T) { type filter = kvserverbase.ReplicaResponseFilter var f atomic.Value - noop := filter(func(context.Context, roachpb.BatchRequest, *roachpb.BatchResponse) *roachpb.Error { + noop := filter(func(context.Context, *roachpb.BatchRequest, *roachpb.BatchResponse) *roachpb.Error { return nil }) f.Store(noop) @@ -3202,7 +3202,7 @@ func TestAmbiguousResultIsRetried(t *testing.T) { s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ - TestingResponseFilter: func(ctx context.Context, request roachpb.BatchRequest, response *roachpb.BatchResponse) *roachpb.Error { + TestingResponseFilter: func(ctx context.Context, request *roachpb.BatchRequest, response *roachpb.BatchResponse) *roachpb.Error { return f.Load().(filter)(ctx, request, response) }, }, @@ -3222,7 +3222,7 @@ func TestAmbiguousResultIsRetried(t *testing.T) { testCtx, cancel := context.WithCancel(ctx) defer cancel() errorsAfterEndTxn := make(chan chan *roachpb.Error) - f.Store(filter(func(ctx context.Context, request roachpb.BatchRequest, response *roachpb.BatchResponse) *roachpb.Error { + f.Store(filter(func(ctx context.Context, request *roachpb.BatchRequest, response *roachpb.BatchResponse) *roachpb.Error { switch r := request.Requests[0].GetInner().(type) { case *roachpb.ConditionalPutRequest: if !bytes.HasPrefix(r.Key, indexPrefix) { diff --git a/pkg/sql/conn_executor_internal_test.go b/pkg/sql/conn_executor_internal_test.go index a569e3d85a16..635ebd3f79bb 100644 --- a/pkg/sql/conn_executor_internal_test.go +++ b/pkg/sql/conn_executor_internal_test.go @@ -265,7 +265,7 @@ func startConnExecutor( stopper := stop.NewStopper() clock := hlc.NewClockWithSystemTimeSource(0 /* maxOffset */) factory := kv.MakeMockTxnSenderFactory( - func(context.Context, *roachpb.Transaction, roachpb.BatchRequest, + func(context.Context, *roachpb.Transaction, *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { return nil, nil }) diff --git a/pkg/sql/conn_executor_test.go b/pkg/sql/conn_executor_test.go index 84e708c35678..8639bba15c9e 100644 --- a/pkg/sql/conn_executor_test.go +++ b/pkg/sql/conn_executor_test.go @@ -584,7 +584,7 @@ func TestQueryProgress(t *testing.T) { TableReaderBatchBytesLimit: 1500, }, Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(_ context.Context, req roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(_ context.Context, req *roachpb.BatchRequest) *roachpb.Error { if req.IsSingleRequest() { scan, ok := req.Requests[0].GetInner().(*roachpb.ScanRequest) if ok && getTableSpan().ContainsKey(scan.Key) && atomic.LoadInt64(&queryRunningAtomic) == 1 { @@ -795,7 +795,7 @@ func TestRetriableErrorDuringUpgradedTransaction(t *testing.T) { testDB.QueryRow(t, "SELECT 'foo'::regclass::oid").Scan(&fooTableId) // Inject an error that will happen during execution. - filter.setFilter(func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + filter.setFilter(func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if ba.Txn == nil { return nil } @@ -872,7 +872,7 @@ func TestErrorDuringPrepareInExplicitTransactionPropagates(t *testing.T) { require.NoError(t, err) // Inject an error that will happen during planning. - filter.setFilter(func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + filter.setFilter(func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if ba.Txn == nil { return nil } @@ -1131,7 +1131,7 @@ func TestTransactionDeadline(t *testing.T) { // This will be used in the tests for accessing mu. locked := func(f func()) { mu.Lock(); defer mu.Unlock(); f() } // Set up a kvserverbase.ReplicaRequestFilter which will extract the deadline for the test transaction. - checkTransactionDeadlineFilter := func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + checkTransactionDeadlineFilter := func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if ba.Txn == nil { return nil } @@ -1800,13 +1800,13 @@ func (f *dynamicRequestFilter) setFilter(filter kvserverbase.ReplicaRequestFilte // noopRequestFilter is a kvserverbase.ReplicaRequestFilter. func (f *dynamicRequestFilter) filter( - ctx context.Context, request roachpb.BatchRequest, + ctx context.Context, request *roachpb.BatchRequest, ) *roachpb.Error { return f.v.Load().(kvserverbase.ReplicaRequestFilter)(ctx, request) } // noopRequestFilter is a kvserverbase.ReplicaRequestFilter that does nothing. -func noopRequestFilter(ctx context.Context, request roachpb.BatchRequest) *roachpb.Error { +func noopRequestFilter(ctx context.Context, request *roachpb.BatchRequest) *roachpb.Error { return nil } diff --git a/pkg/sql/crdb_internal_test.go b/pkg/sql/crdb_internal_test.go index caddf3ca135b..341b4e4aa3fd 100644 --- a/pkg/sql/crdb_internal_test.go +++ b/pkg/sql/crdb_internal_test.go @@ -527,7 +527,7 @@ func TestDistSQLFlowsVirtualTables(t *testing.T) { params := base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(_ context.Context, req roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(_ context.Context, req *roachpb.BatchRequest) *roachpb.Error { if atomic.LoadInt64(&stallAtomic) == 1 { if req.IsSingleRequest() { scan, ok := req.Requests[0].GetInner().(*roachpb.ScanRequest) diff --git a/pkg/sql/drop_test.go b/pkg/sql/drop_test.go index 06986d383055..78b920d5da64 100644 --- a/pkg/sql/drop_test.go +++ b/pkg/sql/drop_test.go @@ -1091,7 +1091,7 @@ WHERE defer filterState.Unlock() return filterState.txnID } - rf.setFilter(func(ctx context.Context, request roachpb.BatchRequest) *roachpb.Error { + rf.setFilter(func(ctx context.Context, request *roachpb.BatchRequest) *roachpb.Error { if request.Txn == nil || request.Txn.Name != sql.SQLTxnName { return nil } @@ -1130,7 +1130,7 @@ WHERE // fail. We'll want to ensure that we get a retriable error. Use the below // pattern to detect when the user transaction has finished planning and is // now executing: we don't want to inject the error during planning. - rf.setFilter(func(ctx context.Context, request roachpb.BatchRequest) *roachpb.Error { + rf.setFilter(func(ctx context.Context, request *roachpb.BatchRequest) *roachpb.Error { if request.Txn == nil { return nil } diff --git a/pkg/sql/gcjob_test/gc_job_test.go b/pkg/sql/gcjob_test/gc_job_test.go index 11bc1392963b..e63d2f788f49 100644 --- a/pkg/sql/gcjob_test/gc_job_test.go +++ b/pkg/sql/gcjob_test/gc_job_test.go @@ -278,7 +278,7 @@ func TestGCJobRetry(t *testing.T) { params := base.TestServerArgs{Settings: cs} params.Knobs.JobsTestingKnobs = jobs.NewTestingKnobsWithShortIntervals() params.Knobs.Store = &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, request roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, request *roachpb.BatchRequest) *roachpb.Error { r, ok := request.GetArg(roachpb.DeleteRange) if !ok || !r.(*roachpb.DeleteRangeRequest).UseRangeTombstone { return nil @@ -551,7 +551,7 @@ func TestDropIndexWithDroppedDescriptor(t *testing.T) { if !beforeDelRange { knobs.Store = &kvserver.StoreTestingKnobs{ TestingRequestFilter: func( - ctx context.Context, request roachpb.BatchRequest, + ctx context.Context, request *roachpb.BatchRequest, ) *roachpb.Error { req, ok := request.GetArg(roachpb.DeleteRange) if !ok { diff --git a/pkg/sql/importer/exportcsv_test.go b/pkg/sql/importer/exportcsv_test.go index a999876e269c..7cd5d7e890e9 100644 --- a/pkg/sql/importer/exportcsv_test.go +++ b/pkg/sql/importer/exportcsv_test.go @@ -639,7 +639,7 @@ func TestProcessorEncountersUncertaintyError(t *testing.T) { Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if atomic.LoadInt64(&trapRead) == 0 { return nil } diff --git a/pkg/sql/importer/import_stmt_test.go b/pkg/sql/importer/import_stmt_test.go index bf0a85386136..67a93930a76d 100644 --- a/pkg/sql/importer/import_stmt_test.go +++ b/pkg/sql/importer/import_stmt_test.go @@ -7113,7 +7113,7 @@ func TestUDTChangeDuringImport(t *testing.T) { JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), Store: &kvserver.StoreTestingKnobs{ TestingResponseFilter: jobutils.BulkOpResponseFilter(&allowResponse), - TestingRequestFilter: func(ctx context.Context, br roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, br *roachpb.BatchRequest) *roachpb.Error { for _, ru := range br.Requests { switch ru.GetInner().(type) { case *roachpb.AddSSTableRequest: diff --git a/pkg/sql/insert_fast_path.go b/pkg/sql/insert_fast_path.go index 8853b6b629e5..91c675ea5bf9 100644 --- a/pkg/sql/insert_fast_path.go +++ b/pkg/sql/insert_fast_path.go @@ -209,7 +209,8 @@ func (n *insertFastPathNode) runFKChecks(params runParams) error { defer n.run.fkBatch.Reset() // Run the FK checks batch. - br, err := params.p.txn.Send(params.ctx, n.run.fkBatch) + ba := n.run.fkBatch.ShallowCopy() + br, err := params.p.txn.Send(params.ctx, ba) if err != nil { return err.GoError() } diff --git a/pkg/sql/row/fetcher.go b/pkg/sql/row/fetcher.go index 68b6b6387362..c14dcfc81e04 100644 --- a/pkg/sql/row/fetcher.go +++ b/pkg/sql/row/fetcher.go @@ -538,7 +538,7 @@ func (rf *Fetcher) StartInconsistentScan( log.Infof(ctx, "starting inconsistent scan at timestamp %v", txnTimestamp) } - sendFn := func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, error) { + sendFn := func(ctx context.Context, ba *roachpb.BatchRequest) (*roachpb.BatchResponse, error) { if now := timeutil.Now(); now.Sub(txnTimestamp.GoTime()) >= maxTimestampAge { // Time to bump the transaction. First commit the old one (should be a no-op). if err := txn.Commit(ctx); err != nil { diff --git a/pkg/sql/row/kv_batch_fetcher.go b/pkg/sql/row/kv_batch_fetcher.go index 71b952b87739..69f354c6ff3e 100644 --- a/pkg/sql/row/kv_batch_fetcher.go +++ b/pkg/sql/row/kv_batch_fetcher.go @@ -54,7 +54,7 @@ var defaultKVBatchSize = rowinfra.KeyLimit(util.ConstantWithMetamorphicTestValue // sendFunc is the function used to execute a KV batch; normally // wraps (*client.Txn).Send. type sendFunc func( - ctx context.Context, ba roachpb.BatchRequest, + ctx context.Context, ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) // identifiableSpans is a helper for keeping track of the roachpb.Spans with the @@ -228,7 +228,7 @@ func (f *txnKVFetcher) getBatchKeyLimitForIdx(batchIdx int) rowinfra.KeyLimit { func makeKVBatchFetcherDefaultSendFunc(txn *kv.Txn, batchRequestsIssued *int64) sendFunc { return func( ctx context.Context, - ba roachpb.BatchRequest, + ba *roachpb.BatchRequest, ) (*roachpb.BatchResponse, error) { res, err := txn.Send(ctx, ba) if err != nil { @@ -394,7 +394,7 @@ func (f *txnKVFetcher) SetupNextFetch( // fetch retrieves spans from the kv layer. func (f *txnKVFetcher) fetch(ctx context.Context) error { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.Header.WaitPolicy = f.lockWaitPolicy ba.Header.LockTimeout = f.lockTimeout ba.Header.TargetBytes = int64(f.batchBytesLimit) diff --git a/pkg/sql/row/kv_fetcher.go b/pkg/sql/row/kv_fetcher.go index 523af707c77c..e96ff3d184ae 100644 --- a/pkg/sql/row/kv_fetcher.go +++ b/pkg/sql/row/kv_fetcher.go @@ -80,7 +80,7 @@ func NewKVFetcher( sendFn = makeKVBatchFetcherDefaultSendFunc(txn, &batchRequestsIssued) } else { negotiated := false - sendFn = func(ctx context.Context, ba roachpb.BatchRequest) (br *roachpb.BatchResponse, _ error) { + sendFn = func(ctx context.Context, ba *roachpb.BatchRequest) (br *roachpb.BatchResponse, _ error) { ba.RoutingPolicy = roachpb.RoutingPolicy_NEAREST var pErr *roachpb.Error // Only use NegotiateAndSend if we have not yet negotiated a timestamp. diff --git a/pkg/sql/rowexec/processors_test.go b/pkg/sql/rowexec/processors_test.go index 5b8a1b4c404b..883dde2419a9 100644 --- a/pkg/sql/rowexec/processors_test.go +++ b/pkg/sql/rowexec/processors_test.go @@ -459,7 +459,7 @@ func TestDrainingProcessorSwallowsUncertaintyError(t *testing.T) { 0: { Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if atomic.LoadInt64(&trapRead) == 0 { return nil } @@ -634,7 +634,7 @@ func TestUncertaintyErrorIsReturned(t *testing.T) { testClusterArgs.ServerArgsPerNode[node] = base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if atomic.LoadInt64(&trapRead) == 0 { return nil } diff --git a/pkg/sql/run_control_test.go b/pkg/sql/run_control_test.go index 564cc2c90784..0b9c809c083b 100644 --- a/pkg/sql/run_control_test.go +++ b/pkg/sql/run_control_test.go @@ -868,8 +868,8 @@ func TestTenantStatementTimeoutAdmissionQueueCancelation(t *testing.T) { TestingDisableSkipEnforcement: true, }, Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, req roachpb.BatchRequest) *roachpb.Error { - if matchBatch(ctx, &req) { + TestingRequestFilter: func(ctx context.Context, req *roachpb.BatchRequest) *roachpb.Error { + if matchBatch(ctx, req) { // Notify we're blocking. unblockClientCh <- struct{}{} <-qBlockersCh diff --git a/pkg/sql/sem/builtins/generator_builtins.go b/pkg/sql/sem/builtins/generator_builtins.go index af5509acd9f1..f1311cb8a8e1 100644 --- a/pkg/sql/sem/builtins/generator_builtins.go +++ b/pkg/sql/sem/builtins/generator_builtins.go @@ -2139,7 +2139,7 @@ func (sp *spanKeyIterator) Next(ctx context.Context) (bool, error) { func (sp *spanKeyIterator) scan( ctx context.Context, startKey roachpb.Key, endKey roachpb.Key, ) error { - var ba roachpb.BatchRequest + ba := &roachpb.BatchRequest{} ba.TargetBytes = spanKeyIteratorChunkBytes ba.MaxSpanRequestKeys = spanKeyIteratorChunkKeys ba.Add(&roachpb.ScanRequest{ diff --git a/pkg/sql/sem/eval/timeconv_test.go b/pkg/sql/sem/eval/timeconv_test.go index 341ee5cc0c84..7d57271e8578 100644 --- a/pkg/sql/sem/eval/timeconv_test.go +++ b/pkg/sql/sem/eval/timeconv_test.go @@ -48,7 +48,7 @@ func TestClusterTimestampConversion(t *testing.T) { clock := hlc.NewClockWithSystemTimeSource(time.Nanosecond /* maxOffset */) senderFactory := kv.MakeMockTxnSenderFactory( - func(context.Context, *roachpb.Transaction, roachpb.BatchRequest, + func(context.Context, *roachpb.Transaction, *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { panic("unused") }) diff --git a/pkg/sql/sqlliveness/slstorage/slstorage_test.go b/pkg/sql/sqlliveness/slstorage/slstorage_test.go index a14716639e4a..c25dc638847c 100644 --- a/pkg/sql/sqlliveness/slstorage/slstorage_test.go +++ b/pkg/sql/sqlliveness/slstorage/slstorage_test.go @@ -456,15 +456,13 @@ func TestConcurrentAccessSynchronization(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - type filterFunc = func(ctx context.Context, request roachpb.BatchRequest) *roachpb.Error + type filterFunc = func(ctx context.Context, request *roachpb.BatchRequest) *roachpb.Error var requestFilter atomic.Value requestFilter.Store(filterFunc(nil)) s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func( - ctx context.Context, request roachpb.BatchRequest, - ) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, request *roachpb.BatchRequest) *roachpb.Error { if f := requestFilter.Load().(filterFunc); f != nil { return f(ctx, request) } @@ -511,7 +509,7 @@ func TestConcurrentAccessSynchronization(t *testing.T) { }) } unblock := func() { close(blockChannel.Load().(chan struct{})) } - requestFilter.Store(func(ctx context.Context, request roachpb.BatchRequest) *roachpb.Error { + requestFilter.Store(func(ctx context.Context, request *roachpb.BatchRequest) *roachpb.Error { getRequest, ok := request.GetArg(roachpb.Get) if !ok { return nil @@ -654,14 +652,14 @@ func TestDeleteMidUpdateFails(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - type filterFunc = func(context.Context, roachpb.BatchRequest, *roachpb.BatchResponse) *roachpb.Error + type filterFunc = func(context.Context, *roachpb.BatchRequest, *roachpb.BatchResponse) *roachpb.Error var respFilter atomic.Value respFilter.Store(filterFunc(nil)) s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ TestingResponseFilter: func( - ctx context.Context, request roachpb.BatchRequest, resp *roachpb.BatchResponse, + ctx context.Context, request *roachpb.BatchRequest, resp *roachpb.BatchResponse, ) *roachpb.Error { if f := respFilter.Load().(filterFunc); f != nil { return f(ctx, request, resp) @@ -697,7 +695,7 @@ func TestDeleteMidUpdateFails(t *testing.T) { // to perform an update after the get has evaluated. getChan := make(chan chan struct{}) respFilter.Store(func( - ctx context.Context, request roachpb.BatchRequest, _ *roachpb.BatchResponse, + ctx context.Context, request *roachpb.BatchRequest, _ *roachpb.BatchResponse, ) *roachpb.Error { if get, ok := request.GetArg(roachpb.Get); !ok || !bytes.HasPrefix( get.(*roachpb.GetRequest).Key, diff --git a/pkg/sql/sqlstats/persistedsqlstats/compaction_test.go b/pkg/sql/sqlstats/persistedsqlstats/compaction_test.go index 4ca10d2faadd..a4c0ef6a137e 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/compaction_test.go +++ b/pkg/sql/sqlstats/persistedsqlstats/compaction_test.go @@ -475,7 +475,7 @@ func (k *kvScanInterceptor) disable() { atomic.StoreInt32(&k.enabled, 0) } -func (k *kvScanInterceptor) intercept(_ context.Context, ba roachpb.BatchRequest) *roachpb.Error { +func (k *kvScanInterceptor) intercept(_ context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if atomic.LoadInt32(&k.enabled) == 0 { return nil } diff --git a/pkg/sql/stats/create_stats_job_test.go b/pkg/sql/stats/create_stats_job_test.go index 06602b3c6f07..e64bfe88c57c 100644 --- a/pkg/sql/stats/create_stats_job_test.go +++ b/pkg/sql/stats/create_stats_job_test.go @@ -487,7 +487,7 @@ func createStatsRequestFilter( ) (kvserverbase.ReplicaRequestFilter, func(descpb.ID)) { var tableToBlock atomic.Value tableToBlock.Store(descpb.InvalidID) - return func(ctx context.Context, ba roachpb.BatchRequest) *roachpb.Error { + return func(ctx context.Context, ba *roachpb.BatchRequest) *roachpb.Error { if req, ok := ba.GetArg(roachpb.Scan); ok { _, tableID, _ := encoding.DecodeUvarintAscending(req.(*roachpb.ScanRequest).Key) // Ensure that the tableID is what we expect it to be. diff --git a/pkg/sql/stmtdiagnostics/statement_diagnostics_test.go b/pkg/sql/stmtdiagnostics/statement_diagnostics_test.go index 81d71d5d9a15..6169ebc330d9 100644 --- a/pkg/sql/stmtdiagnostics/statement_diagnostics_test.go +++ b/pkg/sql/stmtdiagnostics/statement_diagnostics_test.go @@ -583,7 +583,7 @@ func TestChangePollInterval(t *testing.T) { Settings: settings, Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, request roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, request *roachpb.BatchRequest) *roachpb.Error { if request.Txn == nil { return nil } diff --git a/pkg/sql/txn_restart_test.go b/pkg/sql/txn_restart_test.go index 96f87a153df3..1f54f890e598 100644 --- a/pkg/sql/txn_restart_test.go +++ b/pkg/sql/txn_restart_test.go @@ -1157,7 +1157,7 @@ func TestReacquireLeaseOnRestart(t *testing.T) { var s serverutils.TestServerInterface var clockUpdate, restartDone int32 testingResponseFilter := func( - ctx context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse, + ctx context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse, ) *roachpb.Error { for _, ru := range ba.Requests { if req := ru.GetGet(); req != nil { diff --git a/pkg/sql/txn_state_test.go b/pkg/sql/txn_state_test.go index 20c0739f75f7..464dfee7369b 100644 --- a/pkg/sql/txn_state_test.go +++ b/pkg/sql/txn_state_test.go @@ -53,7 +53,7 @@ type testContext struct { func makeTestContext(stopper *stop.Stopper) testContext { clock := hlc.NewClock(timeutil.NewManualTime(timeutil.Unix(0, 123)), time.Nanosecond /* maxOffset */) factory := kv.MakeMockTxnSenderFactory( - func(context.Context, *roachpb.Transaction, roachpb.BatchRequest, + func(context.Context, *roachpb.Transaction, *roachpb.BatchRequest, ) (*roachpb.BatchResponse, *roachpb.Error) { return nil, nil }) diff --git a/pkg/sql/user_test.go b/pkg/sql/user_test.go index 89c654478682..096d32bcf12b 100644 --- a/pkg/sql/user_test.go +++ b/pkg/sql/user_test.go @@ -59,7 +59,7 @@ func TestGetUserTimeout(t *testing.T) { close(closedCh) unavailableCh.Store(closedCh) knobs := &kvserver.StoreTestingKnobs{ - TestingRequestFilter: func(ctx context.Context, _ roachpb.BatchRequest) *roachpb.Error { + TestingRequestFilter: func(ctx context.Context, _ *roachpb.BatchRequest) *roachpb.Error { select { case <-unavailableCh.Load().(chan struct{}): case <-ctx.Done(): diff --git a/pkg/testutils/jobutils/jobs_verification.go b/pkg/testutils/jobutils/jobs_verification.go index e0b3334bd898..1872596d41c7 100644 --- a/pkg/testutils/jobutils/jobs_verification.go +++ b/pkg/testutils/jobutils/jobs_verification.go @@ -134,7 +134,7 @@ func RunJob( // related to bulk IO/backup/restore/import: Export, Import and AddSSTable. See // discussion on RunJob for where this might be useful. func BulkOpResponseFilter(allowProgressIota *chan struct{}) kvserverbase.ReplicaResponseFilter { - return func(_ context.Context, ba roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { + return func(_ context.Context, ba *roachpb.BatchRequest, br *roachpb.BatchResponse) *roachpb.Error { for _, ru := range br.Responses { switch ru.GetInner().(type) { case *roachpb.ExportResponse, *roachpb.AddSSTableResponse: diff --git a/pkg/testutils/kvclientutils/txn_recovery.go b/pkg/testutils/kvclientutils/txn_recovery.go index 8d465e1a64df..1b97035f103c 100644 --- a/pkg/testutils/kvclientutils/txn_recovery.go +++ b/pkg/testutils/kvclientutils/txn_recovery.go @@ -73,7 +73,7 @@ func CheckPushResult( // expire. Force: true, } - ba := roachpb.BatchRequest{} + ba := &roachpb.BatchRequest{} ba.Add(&pushReq) recCtx, collectRecAndFinish := tracing.ContextWithRecordingSpan(ctx, tr, "test trace")