-
Notifications
You must be signed in to change notification settings - Fork 3.8k
/
client_merge_test.go
3561 lines (3187 loc) · 125 KB
/
client_merge_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package storage_test
import (
"bytes"
"context"
"fmt"
"math"
"math/rand"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/config"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/internal/client"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/rpc/nodedialer"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/engine"
"github.com/cockroachdb/cockroach/pkg/storage/engine/enginepb"
"github.com/cockroachdb/cockroach/pkg/storage/rditer"
"github.com/cockroachdb/cockroach/pkg/storage/stateloader"
"github.com/cockroachdb/cockroach/pkg/storage/storagebase"
"github.com/cockroachdb/cockroach/pkg/storage/txnwait"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/ctxgroup"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/randutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.etcd.io/etcd/raft"
"go.etcd.io/etcd/raft/raftpb"
)
func adminMergeArgs(key roachpb.Key) *roachpb.AdminMergeRequest {
return &roachpb.AdminMergeRequest{
RequestHeader: roachpb.RequestHeader{
Key: key,
},
}
}
// createSplitRanges issues an AdminSplit command for the key "b". It returns
// the descriptors for the ranges to the left and right of the split.
func createSplitRanges(
ctx context.Context, store *storage.Store,
) (*roachpb.RangeDescriptor, *roachpb.RangeDescriptor, error) {
args := adminSplitArgs(roachpb.Key("b"))
if _, err := client.SendWrapped(ctx, store.TestSender(), args); err != nil {
return nil, nil, err.GoError()
}
lhsDesc := store.LookupReplica(roachpb.RKey("a")).Desc()
rhsDesc := store.LookupReplica(roachpb.RKey("c")).Desc()
if bytes.Equal(lhsDesc.StartKey, rhsDesc.StartKey) {
return nil, nil, fmt.Errorf("split ranges have the same start key: %q = %q",
lhsDesc.StartKey, rhsDesc.StartKey)
}
return lhsDesc, rhsDesc, nil
}
// TestStoreRangeMergeTwoEmptyRanges tries to merge two empty ranges together.
func TestStoreRangeMergeTwoEmptyRanges(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableMergeQueue = true
mtc := &multiTestContext{storeConfig: &storeCfg}
mtc.Start(t, 1)
defer mtc.Stop()
store := mtc.Store(0)
lhsDesc, _, err := createSplitRanges(ctx, store)
if err != nil {
t.Fatal(err)
}
// Merge the RHS back into the LHS.
args := adminMergeArgs(lhsDesc.StartKey.AsRawKey())
_, pErr := client.SendWrapped(ctx, store.TestSender(), args)
if pErr != nil {
t.Fatal(pErr)
}
// Verify the merge by looking up keys from both ranges.
lhsRepl := store.LookupReplica(roachpb.RKey("a"))
rhsRepl := store.LookupReplica(roachpb.RKey("c"))
if !reflect.DeepEqual(lhsRepl, rhsRepl) {
t.Fatalf("ranges were not merged: %s != %s", lhsRepl, rhsRepl)
}
// The LHS has been split once and merged once, so it should have received
// two generation bumps.
if e, a := int64(2), lhsRepl.Desc().GetGeneration(); e != a {
t.Fatalf("expected LHS to have generation %d, but got %d", e, a)
}
}
func getEngineKeySet(t *testing.T, e engine.Engine) map[string]struct{} {
t.Helper()
kvs, err := engine.Scan(e, engine.NilKey, engine.MVCCKeyMax, 0 /* max */)
if err != nil {
t.Fatal(err)
}
out := map[string]struct{}{}
for _, kv := range kvs {
out[string(kv.Key.Key)] = struct{}{}
}
return out
}
// TestStoreRangeMergeMetadataCleanup tests that all metadata of a
// subsumed range is cleaned up on merge.
func TestStoreRangeMergeMetadataCleanup(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableMergeQueue = true
mtc := &multiTestContext{storeConfig: &storeCfg}
mtc.Start(t, 1)
defer mtc.Stop()
store := mtc.Store(0)
content := roachpb.Key("testing!")
// Write some values left of the proposed split key.
pArgs := putArgs(roachpb.Key("aaa"), content)
if _, pErr := client.SendWrapped(ctx, store.TestSender(), pArgs); pErr != nil {
t.Fatal(pErr)
}
// Collect all the keys.
preKeys := getEngineKeySet(t, store.Engine())
// Split the range.
lhsDesc, rhsDesc, err := createSplitRanges(ctx, store)
if err != nil {
t.Fatal(err)
}
// Write some values right of the split key.
pArgs = putArgs(roachpb.Key("ccc"), content)
if _, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{
RangeID: rhsDesc.RangeID,
}, pArgs); pErr != nil {
t.Fatal(pErr)
}
// Merge the b range back into the a range.
args := adminMergeArgs(lhsDesc.StartKey.AsRawKey())
if _, pErr := client.SendWrapped(ctx, store.TestSender(), args); pErr != nil {
t.Fatal(pErr)
}
// Collect all the keys again.
postKeys := getEngineKeySet(t, store.Engine())
// Compute the new keys.
for k := range preKeys {
delete(postKeys, k)
}
tombstoneKey := string(keys.RaftTombstoneKey(rhsDesc.RangeID))
if _, ok := postKeys[tombstoneKey]; !ok {
t.Errorf("tombstone key (%s) missing after merge", roachpb.Key(tombstoneKey))
}
delete(postKeys, tombstoneKey)
// Keep only the subsumed range's local keys.
localRangeKeyPrefix := string(keys.MakeRangeIDPrefix(rhsDesc.RangeID))
for k := range postKeys {
if !strings.HasPrefix(k, localRangeKeyPrefix) {
delete(postKeys, k)
}
}
if numKeys := len(postKeys); numKeys > 0 {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%d keys were not cleaned up:\n", numKeys)
for k := range postKeys {
fmt.Fprintf(&buf, "%s (%q)\n", roachpb.Key(k), k)
}
t.Fatal(buf.String())
}
}
// TestStoreRangeMergeWithData attempts to merge two ranges, each containing
// data.
func TestStoreRangeMergeWithData(t *testing.T) {
defer leaktest.AfterTest(t)()
for _, retries := range []int64{0, 3} {
t.Run(fmt.Sprintf("retries=%d", retries), func(t *testing.T) {
mergeWithData(t, retries)
})
}
}
func mergeWithData(t *testing.T, retries int64) {
ctx := context.Background()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableReplicateQueue = true
storeCfg.TestingKnobs.DisableMergeQueue = true
// Maybe inject some retryable errors when the merge transaction commits.
var mtc *multiTestContext
storeCfg.TestingKnobs.TestingRequestFilter = func(ba roachpb.BatchRequest) *roachpb.Error {
for _, req := range ba.Requests {
if et := req.GetEndTransaction(); et != nil && et.InternalCommitTrigger.GetMergeTrigger() != nil {
if atomic.AddInt64(&retries, -1) >= 0 {
return roachpb.NewError(
roachpb.NewTransactionRetryError(roachpb.RETRY_SERIALIZABLE, "filter err"))
}
}
if req.GetSubsume() != nil {
// Introduce targeted chaos by forcing a lease acquisition before
// Subsume can execute. This triggers an unusual code path where the
// lease acquisition, not Subsume, notices the merge and installs a
// mergeComplete channel on the replica.
mtc.advanceClock(ctx)
}
}
return nil
}
mtc = &multiTestContext{
storeConfig: &storeCfg,
// This test was written before the multiTestContext started creating many
// system ranges at startup, and hasn't been update to take that into
// account.
startWithSingleRange: true,
}
var store1, store2 *storage.Store
mtc.Start(t, 1)
store1, store2 = mtc.stores[0], mtc.stores[0]
defer mtc.Stop()
lhsDesc, rhsDesc, pErr := createSplitRanges(ctx, store1)
if pErr != nil {
t.Fatal(pErr)
}
content := []byte("testing!")
// Write some values left and right of the proposed split key.
pArgs := putArgs(roachpb.Key("aaa"), content)
if _, pErr := client.SendWrapped(ctx, store1.TestSender(), pArgs); pErr != nil {
t.Fatal(pErr)
}
pArgs = putArgs(roachpb.Key("ccc"), content)
if _, pErr := client.SendWrappedWith(ctx, store2.TestSender(), roachpb.Header{
RangeID: rhsDesc.RangeID,
}, pArgs); pErr != nil {
t.Fatal(pErr)
}
// Confirm the values are there.
gArgs := getArgs(roachpb.Key("aaa"))
if reply, pErr := client.SendWrapped(ctx, store1.TestSender(), gArgs); pErr != nil {
t.Fatal(pErr)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
gArgs = getArgs(roachpb.Key("ccc"))
if reply, pErr := client.SendWrappedWith(ctx, store2.TestSender(), roachpb.Header{
RangeID: rhsDesc.RangeID,
}, gArgs); pErr != nil {
t.Fatal(pErr)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
// Merge the b range back into the a range.
args := adminMergeArgs(lhsDesc.StartKey.AsRawKey())
if _, pErr := client.SendWrapped(ctx, store1.TestSender(), args); pErr != nil {
t.Fatal(pErr)
}
// Verify no intents remains on range descriptor keys.
for _, key := range []roachpb.Key{keys.RangeDescriptorKey(lhsDesc.StartKey), keys.RangeDescriptorKey(rhsDesc.StartKey)} {
if _, _, err := engine.MVCCGet(
ctx, store1.Engine(), key, store1.Clock().Now(), engine.MVCCGetOptions{},
); err != nil {
t.Fatal(err)
}
}
// Verify the merge by looking up keys from both ranges.
lhsRepl := store1.LookupReplica(roachpb.RKey("a"))
rhsRepl := store1.LookupReplica(roachpb.RKey("c"))
if lhsRepl != rhsRepl {
t.Fatalf("ranges were not merged %+v=%+v", lhsRepl.Desc(), rhsRepl.Desc())
}
if startKey := lhsRepl.Desc().StartKey; !bytes.Equal(startKey, roachpb.RKeyMin) {
t.Fatalf("The start key is not equal to KeyMin %q=%q", startKey, roachpb.RKeyMin)
}
if endKey := rhsRepl.Desc().EndKey; !bytes.Equal(endKey, roachpb.RKeyMax) {
t.Fatalf("The end key is not equal to KeyMax %q=%q", endKey, roachpb.RKeyMax)
}
// Try to get values from after the merge.
gArgs = getArgs(roachpb.Key("aaa"))
if reply, pErr := client.SendWrapped(ctx, store1.TestSender(), gArgs); pErr != nil {
t.Fatal(pErr)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
gArgs = getArgs(roachpb.Key("ccc"))
if reply, pErr := client.SendWrappedWith(ctx, store1.TestSender(), roachpb.Header{
RangeID: rhsRepl.RangeID,
}, gArgs); pErr != nil {
t.Fatal(pErr)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
// Put new values after the merge on both sides.
pArgs = putArgs(roachpb.Key("aaaa"), content)
if _, pErr := client.SendWrapped(ctx, store1.TestSender(), pArgs); pErr != nil {
t.Fatal(pErr)
}
pArgs = putArgs(roachpb.Key("cccc"), content)
if _, pErr := client.SendWrappedWith(ctx, store1.TestSender(), roachpb.Header{
RangeID: rhsRepl.RangeID,
}, pArgs); pErr != nil {
t.Fatal(pErr)
}
// Try to get the newly placed values.
gArgs = getArgs(roachpb.Key("aaaa"))
if reply, pErr := client.SendWrapped(ctx, store1.TestSender(), gArgs); pErr != nil {
t.Fatal(pErr)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
gArgs = getArgs(roachpb.Key("cccc"))
if reply, pErr := client.SendWrapped(ctx, store1.TestSender(), gArgs); pErr != nil {
t.Fatal(pErr)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, content) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, content)
}
gArgs = getArgs(roachpb.Key("cccc"))
if _, pErr := client.SendWrappedWith(ctx, store2, roachpb.Header{
RangeID: rhsDesc.RangeID,
}, gArgs); !testutils.IsPError(
pErr, `r2 was not found`,
) {
t.Fatalf("expected get on rhs to fail after merge, but got err=%v", pErr)
}
if atomic.LoadInt64(&retries) >= 0 {
t.Fatalf("%d retries remaining (expected less than zero)", retries)
}
}
// TestStoreRangeMergeTimestampCache verifies that the timestamp cache on the
// LHS is properly updated after a merge.
func TestStoreRangeMergeTimestampCache(t *testing.T) {
defer leaktest.AfterTest(t)()
testutils.RunTrueAndFalse(t, "disjoint-leaseholders", mergeCheckingTimestampCaches)
}
func mergeCheckingTimestampCaches(t *testing.T, disjointLeaseholders bool) {
ctx := context.Background()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableMergeQueue = true
mtc := &multiTestContext{storeConfig: &storeCfg}
var lhsStore, rhsStore *storage.Store
if disjointLeaseholders {
mtc.Start(t, 2)
lhsStore, rhsStore = mtc.Store(0), mtc.Store(1)
} else {
mtc.Start(t, 1)
lhsStore, rhsStore = mtc.Store(0), mtc.Store(0)
}
defer mtc.Stop()
lhsDesc, rhsDesc, err := createSplitRanges(ctx, lhsStore)
if err != nil {
t.Fatal(err)
}
if disjointLeaseholders {
mtc.replicateRange(lhsDesc.RangeID, 1)
mtc.replicateRange(rhsDesc.RangeID, 1)
mtc.transferLease(ctx, rhsDesc.RangeID, 0, 1)
testutils.SucceedsSoon(t, func() error {
rhsRepl, err := rhsStore.GetReplica(rhsDesc.RangeID)
if err != nil {
return err
}
if !rhsRepl.OwnsValidLease(mtc.clock.Now()) {
return errors.New("rhs store does not own valid lease for rhs range")
}
return nil
})
}
// Write a key to the RHS.
rhsKey := roachpb.Key("c")
if _, pErr := client.SendWrappedWith(ctx, rhsStore, roachpb.Header{
RangeID: rhsDesc.RangeID,
}, incrementArgs(rhsKey, 1)); pErr != nil {
t.Fatal(pErr)
}
readTS := mtc.clock.Now()
// Simulate a read on the RHS from a node with a newer clock.
var ba roachpb.BatchRequest
ba.Timestamp = readTS
ba.RangeID = rhsDesc.RangeID
ba.Add(getArgs(rhsKey))
if br, pErr := rhsStore.Send(ctx, ba); pErr != nil {
t.Fatal(pErr)
} else if v, err := br.Responses[0].GetGet().Value.GetInt(); err != nil {
t.Fatal(err)
} else if v != 1 {
t.Fatalf("expected 1, but got %d", v)
} else if br.Timestamp != readTS {
t.Fatalf("expected read to execute at %v, but executed at %v", readTS, br.Timestamp)
}
// Simulate a txn abort on the RHS from a node with a newer clock. Because
// the transaction record for the pushee was not yet written, this will bump
// the write timestamp cache to record the abort.
pushee := roachpb.MakeTransaction("pushee", rhsKey, roachpb.MinUserPriority, readTS, 0)
pusher := roachpb.MakeTransaction("pusher", rhsKey, roachpb.MaxUserPriority, readTS, 0)
ba = roachpb.BatchRequest{}
ba.Timestamp = readTS
ba.RangeID = rhsDesc.RangeID
ba.Add(pushTxnArgs(&pusher, &pushee, roachpb.PUSH_ABORT))
if br, pErr := rhsStore.Send(ctx, ba); pErr != nil {
t.Fatal(pErr)
} else if txn := br.Responses[0].GetPushTxn().PusheeTxn; txn.Status != roachpb.ABORTED {
t.Fatalf("expected aborted pushee, but got %v", txn)
}
// Merge the RHS back into the LHS.
args := adminMergeArgs(lhsDesc.StartKey.AsRawKey())
if _, pErr := client.SendWrapped(ctx, lhsStore.TestSender(), args); pErr != nil {
t.Fatal(pErr)
}
// After the merge, attempt to write under the read. The batch should get
// forwarded to a timestamp after the read.
ba = roachpb.BatchRequest{}
ba.Timestamp = readTS
ba.RangeID = lhsDesc.RangeID
ba.Add(incrementArgs(rhsKey, 1))
if br, pErr := lhsStore.Send(ctx, ba); pErr != nil {
t.Fatal(pErr)
} else if !readTS.Less(br.Timestamp) {
t.Fatalf("expected write to execute after %v, but executed at %v", readTS, br.Timestamp)
}
// Attempt to create a transaction record for the pushee transaction, which
// was aborted before the merge. This should be rejected with a transaction
// aborted error. The reason will depend on whether the leaseholders were
// disjoint or not because disjoint leaseholders will lead to a loss of
// resolution in the timestamp cache. Either way though, the transaction
// should not be allowed to create its record.
hb, hbH := heartbeatArgs(&pushee, mtc.clock.Now())
ba = roachpb.BatchRequest{}
ba.Header = hbH
ba.RangeID = lhsDesc.RangeID
ba.Add(hb)
var expReason roachpb.TransactionAbortedReason
if disjointLeaseholders {
expReason = roachpb.ABORT_REASON_TIMESTAMP_CACHE_REJECTED_POSSIBLE_REPLAY
} else {
expReason = roachpb.ABORT_REASON_ABORTED_RECORD_FOUND
}
if _, pErr := lhsStore.Send(ctx, ba); pErr == nil {
t.Fatalf("expected TransactionAbortedError(%s) but got %v", expReason, pErr)
} else if abortErr, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); !ok {
t.Fatalf("expected TransactionAbortedError(%s) but got %v", expReason, pErr)
} else if abortErr.Reason != expReason {
t.Fatalf("expected TransactionAbortedError(%s) but got %v", expReason, pErr)
}
}
// TestStoreRangeMergeTimestampCacheCausality verifies that range merges update
// the clock on the subsuming store as necessary to preserve causality.
//
// The test simulates a particularly diabolical sequence of events in which
// causality information is not communicated through the normal channels.
// Suppose two adjacent ranges, A and B, are collocated on S2, S3, and S4. (S1
// is omitted for consistency with the store numbering in the test itself.) S3
// holds the lease on A, while S4 holds the lease on B. Every store's clock
// starts at time T1.
//
// To merge A and B, S3 will launch a merge transaction that sends several RPCs
// to S4. Suppose that, just before S4 begins executing the Subsume request, a
// read sneaks in for some key K at a large timestamp T3. S4 will bump its clock
// from T1 to T3, so when the Subsume goes to determine the current time to use
// for the FreezeStart field in the Subsume response, it will use T3. When S3
// completes the merge, it will thus use T3 as the timestamp cache's low water
// mark for the keys that previously belonged to B.
//
// Importantly, S3 must also update its clock from T1 to T3. Otherwise, as this
// test demonstrates, it is possible for S3 to send a lease to another store, in
// this case S2, that begins at T2. S2 will then assume it is free to accept a
// write at T2, when in fact we already served a read at T3. This would be a
// serializability violation!
//
// Note that there are several mechanisms that *almost* prevent this problem. If
// the read of K at T3 occurs slightly earlier, the batch response for Subsume
// will set the Now field to T3, which S3 will use to bump its clock.
// (BatchResponse.Now is computed when the batch is received, not when it
// finishes executing.) If S3 receives a write for K at T2, it will a) properly
// bump the write to T4, because its timestamp cache is up to date, and then b)
// bump its clock to T4. Or if S4 were to send a single RPC to S3, S3 would bump
// its clock based on the BatchRequest.Timestamp.
//
// In short, this sequence of events is likely to be exceedingly unlikely in
// practice, but is subtle enough to warrant a test.
func TestStoreRangeMergeTimestampCacheCausality(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
storeCfg := storage.TestStoreConfig(nil /* clock */)
storeCfg.TestingKnobs.DisableMergeQueue = true
mtc := &multiTestContext{storeConfig: &storeCfg}
var readTS hlc.Timestamp
rhsKey := roachpb.Key("c")
mtc.storeConfig.TestingKnobs.TestingRequestFilter = func(ba roachpb.BatchRequest) *roachpb.Error {
if ba.IsSingleSubsumeRequest() {
// Before we execute a Subsume request, execute a read on the same store
// at a much higher timestamp.
gba := roachpb.BatchRequest{}
gba.RangeID = ba.RangeID
gba.Timestamp = ba.Timestamp.Add(42 /* wallTime */, 0 /* logical */)
gba.Add(getArgs(rhsKey))
store := mtc.Store(int(ba.Header.Replica.StoreID - 1))
gbr, pErr := store.Send(ctx, gba)
if pErr != nil {
t.Error(pErr) // different goroutine, so can't use t.Fatal
}
readTS = gbr.Timestamp
}
return nil
}
for i := 0; i < 4; i++ {
clock := hlc.NewClock(hlc.NewManualClock(123).UnixNano, time.Millisecond /* maxOffset */)
mtc.clocks = append(mtc.clocks, clock)
}
mtc.Start(t, 4)
defer mtc.Stop()
distSender := mtc.distSenders[0]
for _, key := range []roachpb.Key{roachpb.Key("a"), roachpb.Key("b")} {
if _, pErr := client.SendWrapped(ctx, distSender, adminSplitArgs(key)); pErr != nil {
t.Fatal(pErr)
}
}
lhsRangeID := mtc.Store(0).LookupReplica(roachpb.RKey("a")).RangeID
rhsRangeID := mtc.Store(0).LookupReplica(roachpb.RKey("b")).RangeID
// Replicate [a, b) to s2, s3, and s4, and put the lease on s3.
mtc.replicateRange(lhsRangeID, 1, 2, 3)
mtc.transferLease(ctx, lhsRangeID, 0, 2)
mtc.unreplicateRange(lhsRangeID, 0)
// Replicate [b, Max) to s2, s3, and s4, and put the lease on s4.
mtc.replicateRange(rhsRangeID, 1, 2, 3)
mtc.transferLease(ctx, rhsRangeID, 0, 3)
mtc.unreplicateRange(rhsRangeID, 0)
// N.B. We isolate r1 on s1 so that node liveness heartbeats do not interfere
// with our precise clock management on s2, s3, and s4.
// Write a key to [b, Max).
if _, pErr := client.SendWrapped(ctx, distSender, incrementArgs(rhsKey, 1)); pErr != nil {
t.Fatal(pErr)
}
// Wait for all relevant stores to have the same value. This indirectly
// ensures the lease transfers have applied on all relevant stores.
mtc.waitForValues(rhsKey, []int64{0, 1, 1, 1})
// Merge [a, b) and [b, Max). Our request filter above will intercept the
// merge and execute a read with a large timestamp immediately before the
// Subsume request executes.
if _, pErr := client.SendWrappedWith(ctx, mtc.Store(2), roachpb.Header{
RangeID: lhsRangeID,
}, adminMergeArgs(roachpb.Key("a"))); pErr != nil {
t.Fatal(pErr)
}
// Immediately transfer the lease on the merged range [a, Max) from s3 to s2.
// To test that it is, in fact, the merge trigger that properly bumps s3's
// clock, s3 must not send or receive any requests before it transfers the
// lease, as those requests could bump s3's clock through other code paths.
mtc.transferLease(ctx, lhsRangeID, 2, 1)
testutils.SucceedsSoon(t, func() error {
lhsRepl1, err := mtc.Store(1).GetReplica(lhsRangeID)
if err != nil {
return err
}
if !lhsRepl1.OwnsValidLease(mtc.clocks[1].Now()) {
return errors.New("s2 does not own valid lease for lhs range")
}
return nil
})
// Attempt to write at the same time as the read. The write's timestamp
// should be forwarded to after the read.
ba := roachpb.BatchRequest{}
ba.Timestamp = readTS
ba.RangeID = lhsRangeID
ba.Add(incrementArgs(rhsKey, 1))
if br, pErr := mtc.Store(1).Send(ctx, ba); pErr != nil {
t.Fatal(pErr)
} else if !readTS.Less(br.Timestamp) {
t.Fatalf("expected write to execute after %v, but executed at %v", readTS, br.Timestamp)
}
}
// TestStoreRangeMergeLastRange verifies that merging the last range fails.
func TestStoreRangeMergeLastRange(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
mtc := multiTestContext{
// This test was written before the multiTestContext started creating many
// system ranges at startup, and hasn't been update to take that into
// account.
startWithSingleRange: true,
}
mtc.Start(t, 1)
defer mtc.Stop()
store := mtc.Store(0)
// Merge last range.
_, pErr := client.SendWrapped(ctx, store.TestSender(), adminMergeArgs(roachpb.KeyMin))
if !testutils.IsPError(pErr, "cannot merge final range") {
t.Fatalf("expected 'cannot merge final range' error; got %s", pErr)
}
}
func TestStoreRangeMergeTxnFailure(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
storeCfg.TestingKnobs.DisableMergeQueue = true
// Install a store filter that maybe injects retryable errors into a merge
// transaction before ultimately aborting the merge.
var retriesBeforeFailure int64
storeCfg.TestingKnobs.TestingRequestFilter = func(ba roachpb.BatchRequest) *roachpb.Error {
for _, req := range ba.Requests {
if et := req.GetEndTransaction(); et != nil && et.InternalCommitTrigger.GetMergeTrigger() != nil {
if atomic.AddInt64(&retriesBeforeFailure, -1) >= 0 {
return roachpb.NewError(
roachpb.NewTransactionRetryError(roachpb.RETRY_SERIALIZABLE, "filter err"))
}
return roachpb.NewError(errors.New("injected permafail"))
}
}
return nil
}
mtc := &multiTestContext{storeConfig: &storeCfg}
mtc.Start(t, 1)
defer mtc.Stop()
store := mtc.Store(0)
kvDB := store.DB()
if err := kvDB.Put(ctx, "aa", "val"); err != nil {
t.Fatal(err)
}
if err := kvDB.Put(ctx, "cc", "val"); err != nil {
t.Fatal(err)
}
lhsDesc, rhsDesc, err := createSplitRanges(ctx, store)
if err != nil {
t.Fatal(err)
}
verifyLHSAndRHSLive := func() {
t.Helper()
for _, tc := range []struct {
rangeID roachpb.RangeID
key roachpb.Key
}{
{lhsDesc.RangeID, roachpb.Key("aa")},
{rhsDesc.RangeID, roachpb.Key("cc")},
} {
if reply, pErr := client.SendWrappedWith(ctx, store.TestSender(), roachpb.Header{
RangeID: tc.rangeID,
}, getArgs(tc.key)); pErr != nil {
t.Fatal(pErr)
} else if replyBytes, err := reply.(*roachpb.GetResponse).Value.GetBytes(); err != nil {
t.Fatal(err)
} else if !bytes.Equal(replyBytes, []byte("val")) {
t.Fatalf("actual value %q did not match expected value %q", replyBytes, []byte("val"))
}
}
}
attemptMerge := func() {
t.Helper()
args := adminMergeArgs(lhsDesc.StartKey.AsRawKey())
_, pErr := client.SendWrapped(ctx, store.TestSender(), args)
if exp := "injected permafail"; !testutils.IsPError(pErr, exp) {
t.Fatalf("expected %q error, but got %q", exp, pErr)
}
}
verifyLHSAndRHSLive()
atomic.StoreInt64(&retriesBeforeFailure, 0)
attemptMerge()
verifyLHSAndRHSLive()
if atomic.LoadInt64(&retriesBeforeFailure) >= 0 {
t.Fatalf("%d retries remaining (expected less than zero)", retriesBeforeFailure)
}
atomic.StoreInt64(&retriesBeforeFailure, 3)
attemptMerge()
verifyLHSAndRHSLive()
if atomic.LoadInt64(&retriesBeforeFailure) >= 0 {
t.Fatalf("%d retries remaining (expected less than zero)", retriesBeforeFailure)
}
}
// TestStoreRangeSplitMergeGeneration verifies that splits and merges both
// update the range descriptor generations of the involved ranges according to
// the comment on the RangeDescriptor.Generation field.
func TestStoreRangeSplitMergeGeneration(t *testing.T) {
defer leaktest.AfterTest(t)()
testutils.RunTrueAndFalse(t, "rhsHasHigherGen", func(t *testing.T, rhsHasHigherGen bool) {
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{
Knobs: base.TestingKnobs{
Store: &storage.StoreTestingKnobs{
// Disable both splits and merges so that we're in full
// control over them.
DisableMergeQueue: true,
DisableSplitQueue: true,
},
},
})
defer s.Stopper().Stop(context.TODO())
leftKey := roachpb.Key("z")
rightKey := leftKey.Next().Next()
// First, split at the left key for convenience, so that we can check
// leftDesc.StartKey == leftKey later.
_, _, err := s.SplitRange(leftKey)
assert.NoError(t, err)
store, err := s.GetStores().(*storage.Stores).GetStore(s.GetFirstStoreID())
assert.NoError(t, err)
leftRepl := store.LookupReplica(keys.MustAddr(leftKey))
assert.NotNil(t, leftRepl)
preSplitGen := leftRepl.Desc().GetGeneration()
leftDesc, rightDesc, err := s.SplitRange(rightKey)
assert.NoError(t, err)
// Split should increment the LHS' generation and also propagate the result
// to the RHS.
assert.Equal(t, preSplitGen+1, leftDesc.GetGeneration())
assert.Equal(t, preSplitGen+1, rightDesc.GetGeneration())
if rhsHasHigherGen {
// Split the RHS again to increment its generation once more, so that
// we get (assuming preSplitGen=1):
//
// |--left@2---||---right@3---||--don't care--|
//
rightDesc, _, err = s.SplitRange(rightKey.Next())
assert.NoError(t, err)
assert.Equal(t, preSplitGen+2, rightDesc.GetGeneration())
} else {
// Split and merge the LHS to increment the generation (it ends up
// being incremented by two). Note that leftKey.Next() is still in
// the left range. Assuming preSplitGen=1, we'll end up in the
// situation:
//
// |--left@4---||---right@2---|
var tmpRightDesc roachpb.RangeDescriptor
leftDesc, tmpRightDesc, err = s.SplitRange(leftKey.Next())
assert.Equal(t, preSplitGen+2, leftDesc.GetGeneration())
assert.Equal(t, preSplitGen+2, tmpRightDesc.GetGeneration())
assert.NoError(t, err)
leftDesc, err = s.MergeRanges(leftKey)
assert.NoError(t, err)
assert.Equal(t, preSplitGen+3, leftDesc.GetGeneration())
}
// Make sure the split/merge shenanigans above didn't get the range
// descriptors confused.
assert.Equal(t, leftKey, leftDesc.StartKey.AsRawKey())
assert.Equal(t, rightKey, rightDesc.StartKey.AsRawKey())
// Merge the two ranges back to verify that the resulting descriptor
// has the correct generation.
mergedDesc, err := s.MergeRanges(leftKey)
assert.NoError(t, err)
maxPreMergeGen := leftDesc.GetGeneration()
if rhsGen := rightDesc.GetGeneration(); rhsGen > maxPreMergeGen {
maxPreMergeGen = rhsGen
}
assert.Equal(t, maxPreMergeGen+1, mergedDesc.GetGeneration())
assert.Equal(t, leftDesc.RangeID, mergedDesc.RangeID)
})
}
// TestStoreRangeMergeStats starts by splitting a range, then writing random
// data to both sides of the split. It then merges the ranges and verifies the
// merged range has stats consistent with recomputations.
func TestStoreRangeMergeStats(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableMergeQueue = true
mtc := &multiTestContext{storeConfig: &storeCfg}
mtc.Start(t, 1)
defer mtc.Stop()
store := mtc.Store(0)
// Split the range.
lhsDesc, rhsDesc, err := createSplitRanges(ctx, store)
if err != nil {
t.Fatal(err)
}
// Write some values left and right of the proposed split key.
storage.WriteRandomDataToRange(t, store, lhsDesc.RangeID, []byte("aaa"))
storage.WriteRandomDataToRange(t, store, rhsDesc.RangeID, []byte("ccc"))
// Litter some abort span records. txn1 will leave a record on the LHS, txn2
// will leave a record on the RHS, and txn3 will leave a record on both. This
// tests whether the merge code properly accounts for merging abort span
// records for the same transaction.
txn1 := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */, client.RootTxn)
if err := txn1.Put(ctx, "a-txn1", "val"); err != nil {
t.Fatal(err)
}
txn2 := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */, client.RootTxn)
if err := txn2.Put(ctx, "c-txn2", "val"); err != nil {
t.Fatal(err)
}
txn3 := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */, client.RootTxn)
if err := txn3.Put(ctx, "a-txn3", "val"); err != nil {
t.Fatal(err)
}
if err := txn3.Put(ctx, "c-txn3", "val"); err != nil {
t.Fatal(err)
}
hiPriTxn := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */, client.RootTxn)
hiPriTxn.InternalSetPriority(enginepb.MaxTxnPriority)
for _, key := range []string{"a-txn1", "c-txn2", "a-txn3", "c-txn3"} {
if err := hiPriTxn.Put(ctx, key, "val"); err != nil {
t.Fatal(err)
}
}
if err := hiPriTxn.Commit(ctx); err != nil {
t.Fatal(err)
}
// Leave txn1-txn3 open so that their abort span records exist during the
// merge below.
// Get the range stats for both ranges now that we have data.
snap := store.Engine().NewSnapshot()
defer snap.Close()
msA, err := stateloader.Make(lhsDesc.RangeID).LoadMVCCStats(ctx, snap)
if err != nil {
t.Fatal(err)
}
msB, err := stateloader.Make(rhsDesc.RangeID).LoadMVCCStats(ctx, snap)
if err != nil {
t.Fatal(err)
}
// Stats should agree with recomputation.
if err := verifyRecomputedStats(snap, lhsDesc, msA, mtc.manualClock.UnixNano()); err != nil {
t.Fatalf("failed to verify range A's stats before split: %+v", err)
}
if err := verifyRecomputedStats(snap, rhsDesc, msB, mtc.manualClock.UnixNano()); err != nil {
t.Fatalf("failed to verify range B's stats before split: %+v", err)
}
mtc.manualClock.Increment(100)
// Merge the b range back into the a range.
args := adminMergeArgs(lhsDesc.StartKey.AsRawKey())
if _, err := client.SendWrapped(ctx, store.TestSender(), args); err != nil {
t.Fatal(err)
}
replMerged := store.LookupReplica(lhsDesc.StartKey)
// Get the range stats for the merged range and verify.
snap = store.Engine().NewSnapshot()
defer snap.Close()
msMerged, err := stateloader.Make(replMerged.RangeID).LoadMVCCStats(ctx, snap)
if err != nil {
t.Fatal(err)
}
// Merged stats should agree with recomputation.
nowNanos := mtc.manualClock.UnixNano()
msMerged.AgeTo(nowNanos)
if err := verifyRecomputedStats(snap, replMerged.Desc(), msMerged, nowNanos); err != nil {
t.Errorf("failed to verify range's stats after merge: %+v", err)
}
}
func TestStoreRangeMergeInFlightTxns(t *testing.T) {
defer leaktest.AfterTest(t)()
ctx := context.Background()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableReplicateQueue = true
storeCfg.TestingKnobs.DisableMergeQueue = true
mtc := &multiTestContext{storeConfig: &storeCfg}
mtc.Start(t, 1)
defer mtc.Stop()
store := mtc.Store(0)
// Create two adjacent ranges.
setupReplicas := func() (lhsDesc, rhsDesc *roachpb.RangeDescriptor, err error) {
lhsDesc, rhsDesc, err = createSplitRanges(ctx, store)
if err != nil {
return nil, nil, err
}
return lhsDesc, rhsDesc, nil
}
// Verify that a transaction can span a merge.
t.Run("valid", func(t *testing.T) {
lhsDesc, _, err := setupReplicas()
if err != nil {
t.Fatal(err)
}
lhsKey, rhsKey := roachpb.Key("aa"), roachpb.Key("cc")
txn := client.NewTxn(ctx, store.DB(), 0 /* gatewayNodeID */, client.RootTxn)
// Put the key on the RHS side first so ownership of the transaction record
// will need to transfer to the LHS range during the merge.
if err := txn.Put(ctx, rhsKey, t.Name()); err != nil {
t.Fatal(err)