diff --git a/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go b/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go index 86f6a0d8c8bd..bd0b212fd68b 100644 --- a/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go +++ b/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go @@ -275,7 +275,7 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) { require.Equal(t, []roachpb.ReplicaDescriptor{ {NodeID: 1, StoreID: 1, ReplicaID: 1}, {NodeID: 2, StoreID: 2, ReplicaID: 2}, - }, entry.Desc().Replicas().All()) + }, entry.Desc().Replicas().Descriptors()) // Relocate the follower. n2 will no longer have a replica. n1.Exec(t, `ALTER TABLE test EXPERIMENTAL_RELOCATE VALUES (ARRAY[1,3], 1)`) @@ -296,7 +296,7 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) { require.Equal(t, []roachpb.ReplicaDescriptor{ {NodeID: 1, StoreID: 1, ReplicaID: 1}, {NodeID: 3, StoreID: 3, ReplicaID: 3}, - }, entry.Desc().Replicas().All()) + }, entry.Desc().Replicas().Descriptors()) // Make a note of the follower reads metric on n3. We'll check that it was // incremented. diff --git a/pkg/cli/debug.go b/pkg/cli/debug.go index 6647b9c4c6bb..17b75d2ddda6 100644 --- a/pkg/cli/debug.go +++ b/pkg/cli/debug.go @@ -955,7 +955,7 @@ func removeDeadReplicas( err = kvserver.IterateRangeDescriptors(ctx, db, func(desc roachpb.RangeDescriptor) error { hasSelf := false numDeadPeers := 0 - allReplicas := desc.Replicas().All() + allReplicas := desc.Replicas().Descriptors() maxLivePeer := roachpb.StoreID(-1) for _, rep := range allReplicas { if rep.StoreID == storeIdent.StoreID { @@ -998,7 +998,7 @@ func removeDeadReplicas( StoreID: storeIdent.StoreID, ReplicaID: desc.NextReplicaID, }} - newDesc.SetReplicas(roachpb.MakeReplicaDescriptors(replicas)) + newDesc.SetReplicas(roachpb.MakeReplicaSet(replicas)) newDesc.NextReplicaID++ fmt.Printf("Replica %s -> %s\n", &desc, &newDesc) newDescs = append(newDescs, newDesc) diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_test.go index 6a0cf1f2e955..4869167f33b6 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_test.go @@ -586,7 +586,7 @@ func TestRetryOnNotLeaseHolderError(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() - recognizedLeaseHolder := testUserRangeDescriptor3Replicas.Replicas().Voters()[1] + recognizedLeaseHolder := testUserRangeDescriptor3Replicas.Replicas().VoterDescriptors()[1] unrecognizedLeaseHolder := roachpb.ReplicaDescriptor{ NodeID: 99, StoreID: 999, @@ -642,7 +642,7 @@ func TestRetryOnNotLeaseHolderError(t *testing.T) { clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) g := makeGossip(t, stopper, rpcContext) - for _, n := range testUserRangeDescriptor3Replicas.Replicas().Voters() { + for _, n := range testUserRangeDescriptor3Replicas.Replicas().VoterDescriptors() { require.NoError(t, g.AddInfoProto( gossip.MakeNodeIDKey(n.NodeID), newNodeDesc(n.NodeID), @@ -823,7 +823,7 @@ func TestDistSenderMovesOnFromReplicaWithStaleLease(t *testing.T) { clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) g := makeGossip(t, stopper, rpcContext) - for _, n := range testUserRangeDescriptor3Replicas.Replicas().Voters() { + for _, n := range testUserRangeDescriptor3Replicas.Replicas().VoterDescriptors() { require.NoError(t, g.AddInfoProto( gossip.MakeNodeIDKey(n.NodeID), newNodeDesc(n.NodeID), @@ -4329,17 +4329,17 @@ func TestDistSenderDescEvictionAfterLeaseUpdate(t *testing.T) { br := &roachpb.BatchResponse{} switch call { case 0: - expRepl := desc1.Replicas().All()[0] + expRepl := desc1.Replicas().Descriptors()[0] require.Equal(t, expRepl, ba.Replica) br.Error = roachpb.NewError(&roachpb.NotLeaseHolderError{ - Lease: &roachpb.Lease{Replica: desc1.Replicas().All()[1]}, + Lease: &roachpb.Lease{Replica: desc1.Replicas().Descriptors()[1]}, }) case 1: - expRep := desc1.Replicas().All()[1] + expRep := desc1.Replicas().Descriptors()[1] require.Equal(t, ba.Replica, expRep) br.Error = roachpb.NewError(roachpb.NewRangeNotFoundError(ba.RangeID, ba.Replica.StoreID)) case 2: - expRep := desc2.Replicas().All()[0] + expRep := desc2.Replicas().Descriptors()[0] require.Equal(t, ba.Replica, expRep) br = ba.CreateReply() default: @@ -4420,7 +4420,7 @@ func TestDistSenderRPCMetrics(t *testing.T) { br := &roachpb.BatchResponse{} if call == 0 { br.Error = roachpb.NewError(&roachpb.NotLeaseHolderError{ - Lease: &roachpb.Lease{Replica: desc.Replicas().All()[1]}, + Lease: &roachpb.Lease{Replica: desc.Replicas().Descriptors()[1]}, }) } else { br.Error = roachpb.NewError(&roachpb.ConditionFailedError{}) @@ -4449,7 +4449,7 @@ func TestDistSenderRPCMetrics(t *testing.T) { ds.rangeCache.Insert(ctx, roachpb.RangeInfo{ Desc: desc, Lease: roachpb.Lease{ - Replica: desc.Replicas().All()[0], + Replica: desc.Replicas().Descriptors()[0], }, }) var ba roachpb.BatchRequest diff --git a/pkg/kv/kvclient/kvcoord/replica_slice.go b/pkg/kv/kvclient/kvcoord/replica_slice.go index 7bfef63fb934..9617ecb63296 100644 --- a/pkg/kv/kvclient/kvcoord/replica_slice.go +++ b/pkg/kv/kvclient/kvcoord/replica_slice.go @@ -67,11 +67,11 @@ func NewReplicaSlice( } // Learner replicas won't serve reads/writes, so we'll send only to the - // `Voters` replicas. This is just an optimization to save a network hop, + // `VoterDescriptors` replicas. This is just an optimization to save a network hop, // everything would still work if we had `All` here. - voters := desc.Replicas().Voters() + voters := desc.Replicas().VoterDescriptors() // If we know a leaseholder, though, let's make sure we include it. - if leaseholder != nil && len(voters) < len(desc.Replicas().All()) { + if leaseholder != nil && len(voters) < len(desc.Replicas().Descriptors()) { found := false for _, v := range voters { if v == *leaseholder { diff --git a/pkg/kv/kvnemesis/applier.go b/pkg/kv/kvnemesis/applier.go index aa9844ad183c..305563e2ef2e 100644 --- a/pkg/kv/kvnemesis/applier.go +++ b/pkg/kv/kvnemesis/applier.go @@ -259,7 +259,7 @@ func newGetReplicasFn(dbs ...*kv.DB) GetReplicasFn { ctx := context.Background() return func(key roachpb.Key) []roachpb.ReplicationTarget { desc := getRangeDesc(ctx, key, dbs...) - replicas := desc.Replicas().All() + replicas := desc.Replicas().Descriptors() targets := make([]roachpb.ReplicationTarget, len(replicas)) for i, replica := range replicas { targets[i] = roachpb.ReplicationTarget{ diff --git a/pkg/kv/kvserver/allocator.go b/pkg/kv/kvserver/allocator.go index dfeff404672b..2dbe7c06a38d 100644 --- a/pkg/kv/kvserver/allocator.go +++ b/pkg/kv/kvserver/allocator.go @@ -346,7 +346,7 @@ func (a *Allocator) ComputeAction( // On the other hand if we get the race where a leaseholder starts adding a // replica in the replicate queue and during this loses its lease, it should // probably not retry. - if learners := desc.Replicas().Learners(); len(learners) > 0 { + if learners := desc.Replicas().LearnerDescriptors(); len(learners) > 0 { // TODO(dan): Since this goes before anything else, the priority here should // be influenced by whatever operations would happen right after the learner // is removed. In the meantime, we don't want to block something important @@ -356,7 +356,7 @@ func (a *Allocator) ComputeAction( return AllocatorRemoveLearner, removeLearnerReplicaPriority } // computeAction expects to operate only on voters. - return a.computeAction(ctx, zone, desc.Replicas().Voters()) + return a.computeAction(ctx, zone, desc.Replicas().VoterDescriptors()) } func (a *Allocator) computeAction( diff --git a/pkg/kv/kvserver/batcheval/cmd_lease_test.go b/pkg/kv/kvserver/batcheval/cmd_lease_test.go index e3c205b86158..ec707f9d4f8e 100644 --- a/pkg/kv/kvserver/batcheval/cmd_lease_test.go +++ b/pkg/kv/kvserver/batcheval/cmd_lease_test.go @@ -119,7 +119,7 @@ func TestLeaseCommandLearnerReplica(t *testing.T) { {NodeID: 2, StoreID: learnerStoreID, Type: roachpb.ReplicaTypeLearner(), ReplicaID: 2}, } desc := roachpb.RangeDescriptor{} - desc.SetReplicas(roachpb.MakeReplicaDescriptors(replicas)) + desc.SetReplicas(roachpb.MakeReplicaSet(replicas)) cArgs := CommandArgs{ EvalCtx: (&MockEvalCtx{ StoreID: voterStoreID, diff --git a/pkg/kv/kvserver/client_merge_test.go b/pkg/kv/kvserver/client_merge_test.go index 21e3dc1df5b3..aabb52444ae4 100644 --- a/pkg/kv/kvserver/client_merge_test.go +++ b/pkg/kv/kvserver/client_merge_test.go @@ -1992,11 +1992,11 @@ func TestStoreRangeMergeAddReplicaRace(t *testing.T) { `|cannot merge range with non-voter replicas` if mergeErr == nil && kvserver.IsRetriableReplicationChangeError(addErr) { // Merge won the race, no add happened. - require.Len(t, afterDesc.Replicas().Voters(), 1) + require.Len(t, afterDesc.Replicas().VoterDescriptors(), 1) require.Equal(t, origDesc.EndKey, afterDesc.EndKey) } else if addErr == nil && testutils.IsError(mergeErr, acceptableMergeErr) { // Add won the race, no merge happened. - require.Len(t, afterDesc.Replicas().Voters(), 2) + require.Len(t, afterDesc.Replicas().VoterDescriptors(), 2) require.Equal(t, beforeDesc.EndKey, afterDesc.EndKey) } else { t.Fatalf(`expected exactly one of merge or add to succeed got: [merge] %v [add] %v`, @@ -2032,7 +2032,7 @@ func TestStoreRangeMergeResplitAddReplicaRace(t *testing.T) { assert.Equal(t, origDesc.RangeID, resplitDesc.RangeID) assert.Equal(t, origDesc.StartKey, resplitDesc.StartKey) assert.Equal(t, origDesc.EndKey, resplitDesc.EndKey) - assert.Equal(t, origDesc.Replicas().All(), resplitDesc.Replicas().All()) + assert.Equal(t, origDesc.Replicas().Descriptors(), resplitDesc.Replicas().Descriptors()) assert.NotEqual(t, origDesc.Generation, resplitDesc.Generation) _, err := tc.Server(0).DB().AdminChangeReplicas( diff --git a/pkg/kv/kvserver/client_migration_test.go b/pkg/kv/kvserver/client_migration_test.go index 2796e18c4b40..a491eb5aa619 100644 --- a/pkg/kv/kvserver/client_migration_test.go +++ b/pkg/kv/kvserver/client_migration_test.go @@ -151,8 +151,8 @@ func TestMigrateWithInflightSnapshot(t *testing.T) { // added. <-blockUntilSnapshotCh desc := tc.LookupRangeOrFatal(t, k) - require.Len(t, desc.Replicas().Voters(), 1) - require.Len(t, desc.Replicas().Learners(), 1) + require.Len(t, desc.Replicas().VoterDescriptors(), 1) + require.Len(t, desc.Replicas().LearnerDescriptors(), 1) // Enqueue the replica in the raftsnapshot queue. We use SucceedsSoon // because it may take a bit for raft to figure out that we need to be diff --git a/pkg/kv/kvserver/client_raft_test.go b/pkg/kv/kvserver/client_raft_test.go index 75df6631cafa..d49a3d4a6927 100644 --- a/pkg/kv/kvserver/client_raft_test.go +++ b/pkg/kv/kvserver/client_raft_test.go @@ -3173,19 +3173,19 @@ func TestDecommission(t *testing.T) { requireNoReplicas := func(storeID roachpb.StoreID, repFactor int) { testutils.SucceedsSoon(t, func() error { desc := tc.LookupRangeOrFatal(t, k) - for _, rDesc := range desc.Replicas().Voters() { + for _, rDesc := range desc.Replicas().VoterDescriptors() { store, err := tc.Servers[int(rDesc.NodeID-1)].Stores().GetStore(rDesc.StoreID) require.NoError(t, err) if err := store.ForceReplicationScanAndProcess(); err != nil { return err } } - if sl := desc.Replicas().Filter(func(rDesc roachpb.ReplicaDescriptor) bool { + if sl := desc.Replicas().FilterToDescriptors(func(rDesc roachpb.ReplicaDescriptor) bool { return rDesc.StoreID == storeID }); len(sl) > 0 { return errors.Errorf("still a replica on s%d: %s", storeID, &desc) } - if len(desc.Replicas().Voters()) != repFactor { + if len(desc.Replicas().VoterDescriptors()) != repFactor { return errors.Errorf("expected %d replicas: %s", repFactor, &desc) } return nil diff --git a/pkg/kv/kvserver/client_relocate_range_test.go b/pkg/kv/kvserver/client_relocate_range_test.go index d7a2d5aacf4d..36dc40269544 100644 --- a/pkg/kv/kvserver/client_relocate_range_test.go +++ b/pkg/kv/kvserver/client_relocate_range_test.go @@ -56,7 +56,7 @@ func requireDescMembers( sort.Slice(targets, func(i, j int) bool { return targets[i].StoreID < targets[j].StoreID }) have := make([]roachpb.ReplicationTarget, 0, len(targets)) - for _, rDesc := range desc.Replicas().All() { + for _, rDesc := range desc.Replicas().Descriptors() { have = append(have, roachpb.ReplicationTarget{ NodeID: rDesc.NodeID, StoreID: rDesc.StoreID, diff --git a/pkg/kv/kvserver/client_replica_backpressure_test.go b/pkg/kv/kvserver/client_replica_backpressure_test.go index a5f6fb0ce3d8..a8132583e162 100644 --- a/pkg/kv/kvserver/client_replica_backpressure_test.go +++ b/pkg/kv/kvserver/client_replica_backpressure_test.go @@ -152,7 +152,7 @@ func TestBackpressureNotAppliedWhenReducingRangeSize(t *testing.T) { // replicas to move according to them. tc.ToggleReplicateQueues(false) defer tc.ToggleReplicateQueues(true) - voters := desc.Replicas().Voters() + voters := desc.Replicas().VoterDescriptors() if len(voters) == 1 && voters[0].NodeID == tc.Server(1).NodeID() { return nil } @@ -270,7 +270,7 @@ func TestBackpressureNotAppliedWhenReducingRangeSize(t *testing.T) { s, repl := getFirstStoreReplica(t, tc.Server(1), tablePrefix) s.SetReplicateQueueActive(false) - require.Len(t, repl.Desc().Replicas().All(), 1) + require.Len(t, repl.Desc().Replicas().Descriptors(), 1) // We really need to make sure that the split queue has hit this range, // otherwise we'll fail to backpressure. go func() { _ = s.ForceSplitScanAndProcess() }() diff --git a/pkg/kv/kvserver/client_replica_test.go b/pkg/kv/kvserver/client_replica_test.go index e814b28eb2df..37ace6142a7e 100644 --- a/pkg/kv/kvserver/client_replica_test.go +++ b/pkg/kv/kvserver/client_replica_test.go @@ -1950,7 +1950,7 @@ func TestSystemZoneConfigs(t *testing.T) { replicas := make(map[roachpb.RangeID]roachpb.RangeDescriptor) for _, s := range tc.Servers { if err := kvserver.IterateRangeDescriptors(ctx, s.Engines()[0], func(desc roachpb.RangeDescriptor) error { - if len(desc.Replicas().Learners()) > 0 { + if len(desc.Replicas().LearnerDescriptors()) > 0 { return fmt.Errorf("descriptor contains learners: %v", desc) } if existing, ok := replicas[desc.RangeID]; ok && !existing.Equal(&desc) { @@ -1964,7 +1964,7 @@ func TestSystemZoneConfigs(t *testing.T) { } var totalReplicas int for _, desc := range replicas { - totalReplicas += len(desc.Replicas().Voters()) + totalReplicas += len(desc.Replicas().VoterDescriptors()) } if totalReplicas != expectedReplicas { return fmt.Errorf("got %d voters, want %d; details: %+v", totalReplicas, expectedReplicas, replicas) diff --git a/pkg/kv/kvserver/client_test.go b/pkg/kv/kvserver/client_test.go index 06b14a6d79c4..1149c3e73c21 100644 --- a/pkg/kv/kvserver/client_test.go +++ b/pkg/kv/kvserver/client_test.go @@ -239,7 +239,7 @@ func createTestStoreWithOpts( var ba roachpb.BatchRequest get := roachpb.GetRequest{} get.Key = keys.LocalMax - ba.Header.Replica = repl.Desc().Replicas().Voters()[0] + ba.Header.Replica = repl.Desc().Replicas().VoterDescriptors()[0] ba.Header.RangeID = repl.RangeID ba.Add(&get) _, pErr := store.Send(ctx, ba) diff --git a/pkg/kv/kvserver/consistency_queue.go b/pkg/kv/kvserver/consistency_queue.go index 18ac209aa90d..2b40d619136f 100644 --- a/pkg/kv/kvserver/consistency_queue.go +++ b/pkg/kv/kvserver/consistency_queue.go @@ -137,7 +137,7 @@ func consistencyQueueShouldQueueImpl( } } // Check if all replicas are live. - for _, rep := range data.desc.Replicas().All() { + for _, rep := range data.desc.Replicas().Descriptors() { if live, err := data.isNodeLive(rep.NodeID); err != nil { log.VErrEventf(ctx, 3, "node %d liveness failed: %s", rep.NodeID, err) return false, 0 diff --git a/pkg/kv/kvserver/merge_queue.go b/pkg/kv/kvserver/merge_queue.go index 0df29629dfd6..7cd17218e7c4 100644 --- a/pkg/kv/kvserver/merge_queue.go +++ b/pkg/kv/kvserver/merge_queue.go @@ -287,7 +287,7 @@ func (mq *mergeQueue) process( return false, err } } - lhsReplicas, rhsReplicas := lhsDesc.Replicas().All(), rhsDesc.Replicas().All() + lhsReplicas, rhsReplicas := lhsDesc.Replicas().Descriptors(), rhsDesc.Replicas().Descriptors() // Defensive sanity check that everything is now a voter. for i := range lhsReplicas { diff --git a/pkg/kv/kvserver/raft_log_queue.go b/pkg/kv/kvserver/raft_log_queue.go index a7a562d93724..e342107eabd1 100644 --- a/pkg/kv/kvserver/raft_log_queue.go +++ b/pkg/kv/kvserver/raft_log_queue.go @@ -202,7 +202,7 @@ func newTruncateDecision(ctx context.Context, r *Replica) (truncateDecision, err log.Eventf(ctx, "raft status before lastUpdateTimes check: %+v", raftStatus.Progress) log.Eventf(ctx, "lastUpdateTimes: %+v", r.mu.lastUpdateTimes) updateRaftProgressFromActivity( - ctx, raftStatus.Progress, r.descRLocked().Replicas().All(), + ctx, raftStatus.Progress, r.descRLocked().Replicas().Descriptors(), func(replicaID roachpb.ReplicaID) bool { return r.mu.lastUpdateTimes.isFollowerActiveSince( ctx, replicaID, now, r.store.cfg.RangeLeaseActiveDuration()) diff --git a/pkg/kv/kvserver/replica.go b/pkg/kv/kvserver/replica.go index ce1f23fddd67..f030793295f5 100644 --- a/pkg/kv/kvserver/replica.go +++ b/pkg/kv/kvserver/replica.go @@ -865,7 +865,7 @@ func maxReplicaIDOfAny(desc *roachpb.RangeDescriptor) roachpb.ReplicaID { return 0 } var maxID roachpb.ReplicaID - for _, repl := range desc.Replicas().All() { + for _, repl := range desc.Replicas().Descriptors() { if repl.ReplicaID > maxID { maxID = repl.ReplicaID } @@ -1066,7 +1066,7 @@ func (r *Replica) State() kvserverpb.RangeInfo { if desc := ri.ReplicaState.Desc; desc != nil { // Learner replicas don't serve follower reads, but they still receive // closed timestamp updates, so include them here. - allReplicas := desc.Replicas().All() + allReplicas := desc.Replicas().Descriptors() for i := range allReplicas { replDesc := &allReplicas[i] r.store.cfg.ClosedTimestamp.Storage.VisitDescending(replDesc.NodeID, func(e ctpb.Entry) (done bool) { diff --git a/pkg/kv/kvserver/replica_command.go b/pkg/kv/kvserver/replica_command.go index 12cc8e03b326..ccd57ccfade1 100644 --- a/pkg/kv/kvserver/replica_command.go +++ b/pkg/kv/kvserver/replica_command.go @@ -593,7 +593,7 @@ func (r *Replica) AdminMerge( // replica of the RHS too early. The comment on // TestStoreRangeMergeUninitializedLHSFollower explains the situation in full. if err := waitForReplicasInit( - ctx, r.store.cfg.NodeDialer, origLeftDesc.RangeID, origLeftDesc.Replicas().All(), + ctx, r.store.cfg.NodeDialer, origLeftDesc.RangeID, origLeftDesc.Replicas().Descriptors(), ); err != nil { return errors.Wrap(err, "waiting for all left-hand replicas to initialize") } @@ -630,16 +630,16 @@ func (r *Replica) AdminMerge( predFullVoter := func(rDesc roachpb.ReplicaDescriptor) bool { return rDesc.GetType() == roachpb.VOTER_FULL } - if len(lReplicas.Filter(predFullVoter)) != len(lReplicas.All()) { + if len(lReplicas.FilterToDescriptors(predFullVoter)) != len(lReplicas.Descriptors()) { return errors.Errorf("cannot merge range with non-voter replicas on lhs: %s", lReplicas) } - if len(rReplicas.Filter(predFullVoter)) != len(rReplicas.All()) { + if len(rReplicas.FilterToDescriptors(predFullVoter)) != len(rReplicas.Descriptors()) { return errors.Errorf("cannot merge range with non-voter replicas on rhs: %s", rReplicas) } - if !replicaSetsEqual(lReplicas.All(), rReplicas.All()) { + if !replicaSetsEqual(lReplicas.Descriptors(), rReplicas.Descriptors()) { return errors.Errorf("ranges not collocated; %s != %s", lReplicas, rReplicas) } - mergeReplicas := lReplicas.All() + mergeReplicas := lReplicas.Descriptors() updatedLeftDesc := *origLeftDesc // lhs.Generation = max(rhs.Generation, lhs.Generation)+1. @@ -992,7 +992,7 @@ func (r *Replica) changeReplicasImpl( } // Queue the replica up into the raft snapshot queue so that the non-voters // that were added receive their first snapshot relatively soon. See the - // comment block above ReplicaDescriptors.NonVoters() for why we do this. + // comment block above ReplicaSet.NonVoterDescriptors() for why we do this. r.store.raftSnapshotQueue.AddAsync(ctx, r, raftSnapshotPriority) } @@ -1032,7 +1032,7 @@ func (r *Replica) changeReplicasImpl( // For all newly added nodes, first add raft learner replicas. They accept raft traffic // (so they can catch up) but don't get to vote (so they don't affect quorum and thus // don't introduce fragility into the system). For details see: - _ = roachpb.ReplicaDescriptors.Learners + _ = roachpb.ReplicaSet.LearnerDescriptors var err error desc, err = addRaftLearners(ctx, r.store, desc, reason, details, adds, internalChangeTypeAddLearner) if err != nil { @@ -1110,7 +1110,7 @@ func maybeLeaveAtomicChangeReplicasAndRemoveLearners( // Now the config isn't joint any more, but we may have demoted some voters // into learners. These learners should go as well. - learners := desc.Replicas().Learners() + learners := desc.Replicas().LearnerDescriptors() if len(learners) == 0 { return desc, nil } @@ -1178,7 +1178,7 @@ func validateReplicationChanges( // Then, check that we're not adding a second replica on nodes that already // have one, or "re-add" an existing replica. We delete from byNodeAndStoreID so that // after this loop, it contains only Nodes that we haven't seen in desc. - for _, rDesc := range desc.Replicas().All() { + for _, rDesc := range desc.Replicas().Descriptors() { byStoreID, ok := byNodeAndStoreID[rDesc.NodeID] if !ok { continue @@ -1231,7 +1231,7 @@ func validateReplicationChanges( // We're adding a replica that's already there. This isn't allowed, even // when the newly added one would be on a different store. if chg.ChangeType.IsAddition() { - if len(desc.Replicas().All()) > 1 { + if len(desc.Replicas().Descriptors()) > 1 { return errors.Mark( errors.Errorf("unable to add replica %v; node already has a replica in %s", chg.Target.StoreID, desc), errMarkInvalidReplicationChange) @@ -1626,7 +1626,7 @@ func prepareChangeReplicasTrigger( var isJoint bool // NB: the DeepCopy is needed or we'll skip over an entry every time we // call RemoveReplica below. - for _, rDesc := range updatedDesc.Replicas().DeepCopy().All() { + for _, rDesc := range updatedDesc.Replicas().DeepCopy().Descriptors() { switch rDesc.GetType() { case roachpb.VOTER_INCOMING: updatedDesc.SetReplicaType(rDesc.NodeID, rDesc.StoreID, roachpb.VOTER_FULL) @@ -1763,7 +1763,7 @@ func execChangeReplicasTxn( // See: // https://github.com/cockroachdb/cockroach/issues/54444#issuecomment-707706553 replicas := crt.Desc.Replicas() - liveReplicas, _ := args.liveAndDeadReplicas(replicas.All()) + liveReplicas, _ := args.liveAndDeadReplicas(replicas.Descriptors()) if !replicas.CanMakeProgress( func(rDesc roachpb.ReplicaDescriptor) bool { for _, inner := range liveReplicas { @@ -2362,8 +2362,8 @@ func (s *Store) AdminRelocateRange( func (s *Store) relocateOne( ctx context.Context, desc *roachpb.RangeDescriptor, targets []roachpb.ReplicationTarget, ) ([]roachpb.ReplicationChange, *roachpb.ReplicationTarget, error) { - rangeReplicas := desc.Replicas().All() - if len(rangeReplicas) != len(desc.Replicas().Voters()) { + rangeReplicas := desc.Replicas().Descriptors() + if len(rangeReplicas) != len(desc.Replicas().VoterDescriptors()) { // The caller removed all the learners, so there shouldn't be anything but // voters. return nil, nil, errors.AssertionFailedf( @@ -2600,8 +2600,8 @@ func (r *Replica) adminScatter( if args.RandomizeLeases && r.OwnsValidLease(ctx, r.store.Clock().Now()) { desc := r.Desc() // Learner replicas aren't allowed to become the leaseholder or raft leader, - // so only consider the `Voters` replicas. - voterReplicas := desc.Replicas().Voters() + // so only consider the `VoterDescriptors` replicas. + voterReplicas := desc.Replicas().VoterDescriptors() newLeaseholderIdx := rand.Intn(len(voterReplicas)) targetStoreID := voterReplicas[newLeaseholderIdx].StoreID if targetStoreID != r.store.StoreID() { diff --git a/pkg/kv/kvserver/replica_consistency.go b/pkg/kv/kvserver/replica_consistency.go index f0dd22c55f17..6bb5c39d2d56 100644 --- a/pkg/kv/kvserver/replica_consistency.go +++ b/pkg/kv/kvserver/replica_consistency.go @@ -283,12 +283,12 @@ func (r *Replica) CheckConsistency( } // args.Terminate is a slice of properly redactable values, but // with %v `redact` will not realize that and will redact the - // whole thing. Wrap it as a ReplicaDescriptors which is a SafeFormatter + // whole thing. Wrap it as a ReplicaSet which is a SafeFormatter // and will get the job done. // // TODO(knz): clean up after https://github.com/cockroachdb/redact/issues/5. { - var tmp redact.SafeFormatter = roachpb.MakeReplicaDescriptors(args.Terminate) + var tmp redact.SafeFormatter = roachpb.MakeReplicaSet(args.Terminate) log.Errorf(ctx, "consistency check failed; fetching details and shutting down minority %v", tmp) } @@ -361,7 +361,7 @@ func (r *Replica) RunConsistencyCheck( // Move the local replica to the front (which makes it the "master" // we're comparing against). - orderedReplicas = append(orderedReplicas, desc.Replicas().All()...) + orderedReplicas = append(orderedReplicas, desc.Replicas().Descriptors()...) sort.Slice(orderedReplicas, func(i, j int) bool { return orderedReplicas[i] == localReplica diff --git a/pkg/kv/kvserver/replica_learner_test.go b/pkg/kv/kvserver/replica_learner_test.go index ad5cfbcefcc8..a8960e1e118d 100644 --- a/pkg/kv/kvserver/replica_learner_test.go +++ b/pkg/kv/kvserver/replica_learner_test.go @@ -172,8 +172,8 @@ func TestAddReplicaViaLearner(t *testing.T) { // added. <-blockUntilSnapshotCh desc := tc.LookupRangeOrFatal(t, scratchStartKey) - require.Len(t, desc.Replicas().Voters(), 1) - require.Len(t, desc.Replicas().Learners(), 1) + require.Len(t, desc.Replicas().VoterDescriptors(), 1) + require.Len(t, desc.Replicas().LearnerDescriptors(), 1) var voters, nonVoters string db.QueryRow(t, @@ -188,8 +188,8 @@ func TestAddReplicaViaLearner(t *testing.T) { require.NoError(t, g.Wait()) desc = tc.LookupRangeOrFatal(t, scratchStartKey) - require.Len(t, desc.Replicas().Voters(), 2) - require.Len(t, desc.Replicas().Learners(), 0) + require.Len(t, desc.Replicas().VoterDescriptors(), 2) + require.Len(t, desc.Replicas().LearnerDescriptors(), 0) require.Equal(t, int64(1), getFirstStoreMetric(t, tc.Server(1), `range.snapshots.applied-initial`)) } @@ -230,13 +230,13 @@ func TestAddRemoveNonVotingReplicasBasic(t *testing.T) { require.NoError(t, g.Wait()) desc := tc.LookupRangeOrFatal(t, scratchStartKey) - require.Len(t, desc.Replicas().NonVoters(), 1) + require.Len(t, desc.Replicas().NonVoterDescriptors(), 1) _, err := tc.RemoveNonVoters(scratchStartKey, tc.Target(1)) require.NoError(t, err) desc = tc.LookupRangeOrFatal(t, scratchStartKey) require.NoError(t, tc.WaitForFullReplication()) - require.Len(t, desc.Replicas().NonVoters(), 0) + require.Len(t, desc.Replicas().NonVoterDescriptors(), 0) } func TestLearnerRaftConfState(t *testing.T) { @@ -299,8 +299,8 @@ func TestLearnerRaftConfState(t *testing.T) { ltk.withStopAfterLearnerAtomic(func() { desc = tc.AddVotersOrFatal(t, scratchStartKey, tc.Target(1)) }) - require.Len(t, desc.Replicas().Learners(), 1) - learnerReplicaID := desc.Replicas().Learners()[0].ReplicaID + require.Len(t, desc.Replicas().LearnerDescriptors(), 1) + learnerReplicaID := desc.Replicas().LearnerDescriptors()[0].ReplicaID // Verify that raft on every node thinks it's a learner. This checks that we // use ConfChangeAddLearnerNode in the ConfChange and also checks that we @@ -355,7 +355,7 @@ func TestLearnerSnapshotFailsRollback(t *testing.T) { // Make sure we cleaned up after ourselves (by removing the learner). desc := tc.LookupRangeOrFatal(t, scratchStartKey) - require.Empty(t, desc.Replicas().Learners()) + require.Empty(t, desc.Replicas().LearnerDescriptors()) } func TestSplitWithLearnerOrJointConfig(t *testing.T) { @@ -381,8 +381,8 @@ func TestSplitWithLearnerOrJointConfig(t *testing.T) { // replication queue will eventually clean this up. left, right, err := tc.SplitRange(scratchStartKey.Next()) require.NoError(t, err) - require.Len(t, left.Replicas().Learners(), 1) - require.Len(t, right.Replicas().Learners(), 1) + require.Len(t, left.Replicas().LearnerDescriptors(), 1) + require.Len(t, right.Replicas().LearnerDescriptors(), 1) // Remove the learner on the RHS. right = tc.RemoveVotersOrFatal(t, right.StartKey.AsRawKey(), tc.Target(1)) @@ -402,7 +402,7 @@ func TestSplitWithLearnerOrJointConfig(t *testing.T) { } return err }) - require.Len(t, right.Replicas().Filter(predIncoming), 1) + require.Len(t, right.Replicas().FilterToDescriptors(predIncoming), 1) left, right, err = tc.SplitRange(right.StartKey.AsRawKey().Next()) require.NoError(t, err) require.False(t, left.Replicas().InAtomicReplicationChange(), left) @@ -440,11 +440,11 @@ func TestReplicateQueueSeesLearnerOrJointConfig(t *testing.T) { // Make sure it deleted the learner. desc := tc.LookupRangeOrFatal(t, scratchStartKey) - require.Empty(t, desc.Replicas().Learners()) + require.Empty(t, desc.Replicas().LearnerDescriptors()) // Bonus points: the replicate queue keeps processing until there is nothing // to do, so it should have upreplicated the range to 3. - require.Len(t, desc.Replicas().Voters(), 3) + require.Len(t, desc.Replicas().VoterDescriptors(), 3) } // Create a VOTER_OUTGOING, i.e. a joint configuration. @@ -465,7 +465,7 @@ func TestReplicateQueueSeesLearnerOrJointConfig(t *testing.T) { desc = tc.LookupRangeOrFatal(t, scratchStartKey) require.False(t, desc.Replicas().InAtomicReplicationChange(), desc) // Queue processed again, so we're back to three replicas. - require.Len(t, desc.Replicas().Voters(), 3) + require.Len(t, desc.Replicas().VoterDescriptors(), 3) }) } @@ -499,14 +499,14 @@ func TestReplicaGCQueueSeesLearnerOrJointConfig(t *testing.T) { } desc := checkNoGC() // Make sure it didn't collect the learner. - require.NotEmpty(t, desc.Replicas().Learners()) + require.NotEmpty(t, desc.Replicas().LearnerDescriptors()) // Now get the range into a joint config. tc.RemoveVotersOrFatal(t, scratchStartKey, tc.Target(1)) // remove learner ltk.withStopAfterJointConfig(func() { desc = tc.AddVotersOrFatal(t, scratchStartKey, tc.Target(1)) - require.Len(t, desc.Replicas().Filter(predIncoming), 1, desc) + require.Len(t, desc.Replicas().FilterToDescriptors(predIncoming), 1, desc) }) postDesc := checkNoGC() @@ -612,8 +612,8 @@ func TestLearnerAdminChangeReplicasRace(t *testing.T) { _, err := tc.RemoveVoters(scratchStartKey, tc.Target(1)) require.NoError(t, err) desc := tc.LookupRangeOrFatal(t, scratchStartKey) - require.Len(t, desc.Replicas().Voters(), 1) - require.Len(t, desc.Replicas().Learners(), 0) + require.Len(t, desc.Replicas().VoterDescriptors(), 1) + require.Len(t, desc.Replicas().LearnerDescriptors(), 0) // Unblock the snapshot, and surprise AddVoters. It should retry and error // that the descriptor has changed since the AdminChangeReplicas command @@ -627,8 +627,8 @@ func TestLearnerAdminChangeReplicasRace(t *testing.T) { t.Fatalf(`expected %q error got: %+v`, msgRE, err) } desc = tc.LookupRangeOrFatal(t, scratchStartKey) - require.Len(t, desc.Replicas().Voters(), 1) - require.Len(t, desc.Replicas().Learners(), 0) + require.Len(t, desc.Replicas().VoterDescriptors(), 1) + require.Len(t, desc.Replicas().LearnerDescriptors(), 0) } // This test verifies the result of a race between the replicate queue running @@ -703,16 +703,16 @@ func TestLearnerReplicateQueueRace(t *testing.T) { // leaving the 2 voters. desc, err := tc.RemoveVoters(scratchStartKey, tc.Target(2)) require.NoError(t, err) - require.Len(t, desc.Replicas().Voters(), 2) - require.Len(t, desc.Replicas().Learners(), 0) + require.Len(t, desc.Replicas().VoterDescriptors(), 2) + require.Len(t, desc.Replicas().LearnerDescriptors(), 0) // Unblock the snapshot, and surprise the replicate queue. It should retry, // get a descriptor changed error, and realize it should stop. close(blockSnapshotsCh) require.NoError(t, <-queue1ErrCh) desc = tc.LookupRangeOrFatal(t, scratchStartKey) - require.Len(t, desc.Replicas().Voters(), 2) - require.Len(t, desc.Replicas().Learners(), 0) + require.Len(t, desc.Replicas().VoterDescriptors(), 2) + require.Len(t, desc.Replicas().LearnerDescriptors(), 0) } func TestLearnerNoAcceptLease(t *testing.T) { @@ -840,7 +840,7 @@ func TestLearnerAndJointConfigFollowerRead(t *testing.T) { // Re-add the voter and remain in joint config. require.True(t, scratchDesc.Replicas().InAtomicReplicationChange(), scratchDesc) - require.Len(t, scratchDesc.Replicas().Filter(predIncoming), 1) + require.Len(t, scratchDesc.Replicas().FilterToDescriptors(predIncoming), 1) // Can't serve follower read from the VOTER_INCOMING. check() @@ -848,7 +848,7 @@ func TestLearnerAndJointConfigFollowerRead(t *testing.T) { // Remove the voter and remain in joint config. scratchDesc = tc.RemoveVotersOrFatal(t, scratchStartKey, tc.Target(1)) require.True(t, scratchDesc.Replicas().InAtomicReplicationChange(), scratchDesc) - require.Len(t, scratchDesc.Replicas().Filter(predDemoting), 1) + require.Len(t, scratchDesc.Replicas().FilterToDescriptors(predDemoting), 1) // Can't serve follower read from the VOTER_OUTGOING. check() @@ -878,16 +878,16 @@ func TestLearnerOrJointConfigAdminRelocateRange(t *testing.T) { check := func(targets []roachpb.ReplicationTarget) { require.NoError(t, tc.Server(0).DB().AdminRelocateRange(ctx, scratchStartKey, targets)) desc := tc.LookupRangeOrFatal(t, scratchStartKey) - voters := desc.Replicas().Voters() + voters := desc.Replicas().VoterDescriptors() require.Len(t, voters, len(targets)) sort.Slice(voters, func(i, j int) bool { return voters[i].NodeID < voters[j].NodeID }) for i := range voters { require.Equal(t, targets[i].NodeID, voters[i].NodeID, `%v`, voters) require.Equal(t, targets[i].StoreID, voters[i].StoreID, `%v`, voters) } - require.Empty(t, desc.Replicas().Learners()) - require.Empty(t, desc.Replicas().Filter(predIncoming)) - require.Empty(t, desc.Replicas().Filter(predOutgoing)) + require.Empty(t, desc.Replicas().LearnerDescriptors()) + require.Empty(t, desc.Replicas().FilterToDescriptors(predIncoming)) + require.Empty(t, desc.Replicas().FilterToDescriptors(predOutgoing)) } // Test AdminRelocateRange's treatment of learners by having one that it has @@ -905,7 +905,7 @@ func TestLearnerOrJointConfigAdminRelocateRange(t *testing.T) { atomic.StoreInt64(<k.replicationAlwaysUseJointConfig, 1) desc := tc.RemoveVotersOrFatal(t, scratchStartKey, tc.Target(3)) require.True(t, desc.Replicas().InAtomicReplicationChange(), desc) - require.Len(t, desc.Replicas().Filter(predDemoting), 1) + require.Len(t, desc.Replicas().FilterToDescriptors(predDemoting), 1) atomic.StoreInt64(<k.replicaAddStopAfterJointConfig, 0) check([]roachpb.ReplicationTarget{tc.Target(0), tc.Target(1), tc.Target(2)}) } @@ -961,10 +961,10 @@ func TestLearnerAndJointConfigAdminMerge(t *testing.T) { atomic.StoreInt64(<k.replicationAlwaysUseJointConfig, 1) desc1 = tc.RemoveVotersOrFatal(t, desc1.StartKey.AsRawKey(), tc.Target(1)) desc1 = tc.AddVotersOrFatal(t, desc1.StartKey.AsRawKey(), tc.Target(1)) - require.Len(t, desc1.Replicas().Filter(predIncoming), 1) + require.Len(t, desc1.Replicas().FilterToDescriptors(predIncoming), 1) desc3 = tc.RemoveVotersOrFatal(t, desc3.StartKey.AsRawKey(), tc.Target(1)) desc3 = tc.AddVotersOrFatal(t, desc3.StartKey.AsRawKey(), tc.Target(1)) - require.Len(t, desc1.Replicas().Filter(predIncoming), 1) + require.Len(t, desc1.Replicas().FilterToDescriptors(predIncoming), 1) // VOTER_INCOMING on the lhs or rhs should fail. // desc{1,2,3} = (VOTER_FULL, VOTER_INCOMING) (VOTER_FULL) (VOTER_FULL, VOTER_INCOMING) @@ -973,9 +973,9 @@ func TestLearnerAndJointConfigAdminMerge(t *testing.T) { // Turn the incoming voters on desc1 and desc3 into VOTER_DEMOTINGs. // desc{1,2,3} = (VOTER_FULL, VOTER_DEMOTING) (VOTER_FULL) (VOTER_FULL, VOTER_DEMOTING) desc1 = tc.RemoveVotersOrFatal(t, desc1.StartKey.AsRawKey(), tc.Target(1)) - require.Len(t, desc1.Replicas().Filter(predDemoting), 1) + require.Len(t, desc1.Replicas().FilterToDescriptors(predDemoting), 1) desc3 = tc.RemoveVotersOrFatal(t, desc3.StartKey.AsRawKey(), tc.Target(1)) - require.Len(t, desc3.Replicas().Filter(predDemoting), 1) + require.Len(t, desc3.Replicas().FilterToDescriptors(predDemoting), 1) // VOTER_DEMOTING on the lhs or rhs should fail. checkFails() @@ -985,14 +985,14 @@ func TestLearnerAndJointConfigAdminMerge(t *testing.T) { // replica sets are equal). // desc{1,2,3} = (VOTER_FULL, VOTER_DEMOTING) (VOTER_FULL, VOTER_INCOMING) (VOTER_FULL, VOTER_DEMOTING) desc2 := tc.AddVotersOrFatal(t, splitKey1, tc.Target(1)) - require.Len(t, desc2.Replicas().Filter(predIncoming), 1) + require.Len(t, desc2.Replicas().FilterToDescriptors(predIncoming), 1) checkFails() // Ditto VOTER_DEMOTING. // desc{1,2,3} = (VOTER_FULL, VOTER_DEMOTING) (VOTER_FULL, VOTER_DEMOTING) (VOTER_FULL, VOTER_DEMOTING) desc2 = tc.RemoveVotersOrFatal(t, desc2.StartKey.AsRawKey(), tc.Target(1)) - require.Len(t, desc2.Replicas().Filter(predDemoting), 1) + require.Len(t, desc2.Replicas().FilterToDescriptors(predDemoting), 1) checkFails() } @@ -1049,8 +1049,8 @@ func TestMergeQueueSeesLearnerOrJointConfig(t *testing.T) { require.Equal(t, origDesc.StartKey, desc.StartKey) require.Equal(t, origDesc.EndKey, desc.EndKey) // The merge removed the learner. - require.Len(t, desc.Replicas().Voters(), 1) - require.Empty(t, desc.Replicas().Learners()) + require.Len(t, desc.Replicas().VoterDescriptors(), 1) + require.Empty(t, desc.Replicas().LearnerDescriptors()) } // Create the RHS again and repeat the same game, except this time the LHS @@ -1062,7 +1062,7 @@ func TestMergeQueueSeesLearnerOrJointConfig(t *testing.T) { ltk.withStopAfterJointConfig(func() { desc = tc.AddVotersOrFatal(t, scratchStartKey, tc.Target(1)) }) - require.Len(t, desc.Replicas().Filter(predIncoming), 1, desc) + require.Len(t, desc.Replicas().FilterToDescriptors(predIncoming), 1, desc) checkTransitioningOut := func() { t.Helper() @@ -1082,7 +1082,7 @@ func TestMergeQueueSeesLearnerOrJointConfig(t *testing.T) { checkTransitioningOut() desc = tc.LookupRangeOrFatal(t, scratchStartKey) - require.Len(t, desc.Replicas().Voters(), 2) + require.Len(t, desc.Replicas().VoterDescriptors(), 2) require.False(t, desc.Replicas().InAtomicReplicationChange(), desc) // Repeat the game, except now we start with two replicas and we're @@ -1090,14 +1090,14 @@ func TestMergeQueueSeesLearnerOrJointConfig(t *testing.T) { desc = splitAndUnsplit() ltk.withStopAfterJointConfig(func() { descRight := tc.RemoveVotersOrFatal(t, desc.EndKey.AsRawKey(), tc.Target(1)) - require.Len(t, descRight.Replicas().Filter(predDemoting), 1, desc) + require.Len(t, descRight.Replicas().FilterToDescriptors(predDemoting), 1, desc) }) // This should transition out (i.e. remove the voter on s2 for the RHS) // and then do its thing, which means in the end we have two voters again. checkTransitioningOut() desc = tc.LookupRangeOrFatal(t, scratchStartKey) - require.Len(t, desc.Replicas().Voters(), 2) + require.Len(t, desc.Replicas().VoterDescriptors(), 2) require.False(t, desc.Replicas().InAtomicReplicationChange(), desc) } } diff --git a/pkg/kv/kvserver/replica_metrics.go b/pkg/kv/kvserver/replica_metrics.go index 7b49d85b58f8..9e87dc46ea2b 100644 --- a/pkg/kv/kvserver/replica_metrics.go +++ b/pkg/kv/kvserver/replica_metrics.go @@ -164,7 +164,7 @@ func calcRangeCounter( // It seems unlikely that a learner replica would be the first live one, but // there's no particular reason to exclude them. Note that `All` returns the // voters first. - for _, rd := range desc.Replicas().All() { + for _, rd := range desc.Replicas().Descriptors() { if livenessMap[rd.NodeID].IsLive { rangeCounter = rd.StoreID == storeID break @@ -193,7 +193,7 @@ func calcRangeCounter( // considered. func calcLiveVoterReplicas(desc *roachpb.RangeDescriptor, livenessMap liveness.IsLiveMap) int { var live int - for _, rd := range desc.Replicas().Voters() { + for _, rd := range desc.Replicas().VoterDescriptors() { if livenessMap[rd.NodeID].IsLive { live++ } @@ -207,7 +207,7 @@ func calcBehindCount( raftStatus *raft.Status, desc *roachpb.RangeDescriptor, livenessMap liveness.IsLiveMap, ) int64 { var behindCount int64 - for _, rd := range desc.Replicas().All() { + for _, rd := range desc.Replicas().Descriptors() { if progress, ok := raftStatus.Progress[uint64(rd.ReplicaID)]; ok { if progress.Match > 0 && progress.Match < raftStatus.Commit { diff --git a/pkg/kv/kvserver/replica_metrics_test.go b/pkg/kv/kvserver/replica_metrics_test.go index efaac4936022..42b36a531400 100644 --- a/pkg/kv/kvserver/replica_metrics_test.go +++ b/pkg/kv/kvserver/replica_metrics_test.go @@ -28,7 +28,7 @@ func TestCalcRangeCounterIsLiveMap(t *testing.T) { // https://github.com/cockroachdb/cockroach/pull/39936#pullrequestreview-359059629 desc := roachpb.NewRangeDescriptor(123, roachpb.RKeyMin, roachpb.RKeyMax, - roachpb.MakeReplicaDescriptors([]roachpb.ReplicaDescriptor{ + roachpb.MakeReplicaSet([]roachpb.ReplicaDescriptor{ {NodeID: 10, StoreID: 11, ReplicaID: 12, Type: roachpb.ReplicaTypeVoterFull()}, {NodeID: 100, StoreID: 110, ReplicaID: 120, Type: roachpb.ReplicaTypeVoterFull()}, {NodeID: 1000, StoreID: 1100, ReplicaID: 1200, Type: roachpb.ReplicaTypeVoterFull()}, diff --git a/pkg/kv/kvserver/replica_proposal_quota.go b/pkg/kv/kvserver/replica_proposal_quota.go index fbf63877f9c6..6ff64c06e107 100644 --- a/pkg/kv/kvserver/replica_proposal_quota.go +++ b/pkg/kv/kvserver/replica_proposal_quota.go @@ -103,7 +103,7 @@ func (r *Replica) updateProposalQuotaRaftMuLocked( // hands. r.mu.proposalQuota = quotapool.NewIntPool(r.rangeStr.String(), uint64(r.store.cfg.RaftProposalQuota)) r.mu.lastUpdateTimes = make(map[roachpb.ReplicaID]time.Time) - r.mu.lastUpdateTimes.updateOnBecomeLeader(r.mu.state.Desc.Replicas().All(), timeutil.Now()) + r.mu.lastUpdateTimes.updateOnBecomeLeader(r.mu.state.Desc.Replicas().Descriptors(), timeutil.Now()) } else if r.mu.proposalQuota != nil { // We're becoming a follower. // We unblock all ongoing and subsequent quota acquisition goroutines diff --git a/pkg/kv/kvserver/replica_raft.go b/pkg/kv/kvserver/replica_raft.go index d7255a74ca42..c649fe3e3d79 100644 --- a/pkg/kv/kvserver/replica_raft.go +++ b/pkg/kv/kvserver/replica_raft.go @@ -1805,7 +1805,7 @@ func maybeCampaignAfterConfChange( // If the leader is no longer in the descriptor but we are the first voter, // campaign. _, leaderStillThere := desc.GetReplicaDescriptorByID(roachpb.ReplicaID(st.Lead)) - if !leaderStillThere && storeID == desc.Replicas().Voters()[0].StoreID { + if !leaderStillThere && storeID == desc.Replicas().VoterDescriptors()[0].StoreID { log.VEventf(ctx, 3, "leader got removed by conf change; campaigning") _ = raftGroup.Campaign() } diff --git a/pkg/kv/kvserver/replica_raft_quiesce.go b/pkg/kv/kvserver/replica_raft_quiesce.go index 40a4899994c9..0a90f4d8c65b 100644 --- a/pkg/kv/kvserver/replica_raft_quiesce.go +++ b/pkg/kv/kvserver/replica_raft_quiesce.go @@ -69,7 +69,7 @@ func (r *Replica) unquiesceWithOptionsLocked(campaignOnWake bool) { } // NB: we know there's a non-nil RaftStatus because internalRaftGroup isn't nil. r.mu.lastUpdateTimes.updateOnUnquiesce( - r.mu.state.Desc.Replicas().All(), r.raftStatusRLocked().Progress, timeutil.Now(), + r.mu.state.Desc.Replicas().Descriptors(), r.raftStatusRLocked().Progress, timeutil.Now(), ) } } @@ -323,7 +323,7 @@ func shouldReplicaQuiesce( var foundSelf bool var lagging laggingReplicaSet - for _, rep := range q.descRLocked().Replicas().All() { + for _, rep := range q.descRLocked().Replicas().Descriptors() { if uint64(rep.ReplicaID) == status.ID { foundSelf = true } diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index eda36ced8ffc..e3cc48f79398 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -9596,7 +9596,7 @@ func TestShouldReplicaQuiesce(t *testing.T) { if ok { // Any non-live replicas should be in the laggingReplicaSet. var expLagging laggingReplicaSet - for _, rep := range q.descRLocked().Replicas().All() { + for _, rep := range q.descRLocked().Replicas().Descriptors() { if l, ok := q.livenessMap[rep.NodeID]; ok && !l.IsLive { expLagging = append(expLagging, l.Liveness) } @@ -12833,7 +12833,7 @@ func TestPrepareChangeReplicasTrigger(t *testing.T) { }) } } - desc := roachpb.NewRangeDescriptor(roachpb.RangeID(10), roachpb.RKeyMin, roachpb.RKeyMax, roachpb.MakeReplicaDescriptors(rDescs)) + desc := roachpb.NewRangeDescriptor(roachpb.RangeID(10), roachpb.RKeyMin, roachpb.RKeyMax, roachpb.MakeReplicaSet(rDescs)) return testCase{ desc: desc, chgs: chgs, @@ -12934,7 +12934,7 @@ func TestRangeUnavailableMessage(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - var repls roachpb.ReplicaDescriptors + var repls roachpb.ReplicaSet repls.AddReplica(roachpb.ReplicaDescriptor{NodeID: 1, StoreID: 10, ReplicaID: 100}) repls.AddReplica(roachpb.ReplicaDescriptor{NodeID: 2, StoreID: 20, ReplicaID: 200}) desc := roachpb.NewRangeDescriptor(10, roachpb.RKey("a"), roachpb.RKey("z"), repls) diff --git a/pkg/kv/kvserver/replica_write.go b/pkg/kv/kvserver/replica_write.go index 049060f8685a..cf9b9f10a30b 100644 --- a/pkg/kv/kvserver/replica_write.go +++ b/pkg/kv/kvserver/replica_write.go @@ -265,7 +265,7 @@ func (r *Replica) executeWriteBatch( desc := r.Desc() // NB: waitForApplication already has a timeout. applicationErr := waitForApplication( - ctx, r.store.cfg.NodeDialer, desc.RangeID, desc.Replicas().All(), + ctx, r.store.cfg.NodeDialer, desc.RangeID, desc.Replicas().Descriptors(), uint64(maxLeaseIndex)) propResult.Err = roachpb.NewError(applicationErr) } @@ -305,7 +305,7 @@ func rangeUnavailableMessage( dur time.Duration, ) { var liveReplicas, otherReplicas []roachpb.ReplicaDescriptor - for _, rDesc := range desc.Replicas().All() { + for _, rDesc := range desc.Replicas().Descriptors() { if lm[rDesc.NodeID].IsLive { liveReplicas = append(liveReplicas, rDesc) } else { @@ -316,7 +316,7 @@ func rangeUnavailableMessage( // Ensure that these are going to redact nicely. var _ redact.SafeFormatter = ba var _ redact.SafeFormatter = desc - var _ redact.SafeFormatter = roachpb.ReplicaDescriptors{} + var _ redact.SafeFormatter = roachpb.ReplicaSet{} s.Printf(`have been waiting %.2fs for proposing command %s. This range is likely unavailable. @@ -337,8 +337,8 @@ support contract. Otherwise, please open an issue at: dur.Seconds(), ba, desc, - roachpb.MakeReplicaDescriptors(liveReplicas), - roachpb.MakeReplicaDescriptors(otherReplicas), + roachpb.MakeReplicaSet(liveReplicas), + roachpb.MakeReplicaSet(otherReplicas), redact.Safe(rs), // raft status contains no PII desc.RangeID, ) diff --git a/pkg/kv/kvserver/replicate_queue.go b/pkg/kv/kvserver/replicate_queue.go index c58de9aed872..2e9052a31e9c 100644 --- a/pkg/kv/kvserver/replicate_queue.go +++ b/pkg/kv/kvserver/replicate_queue.go @@ -222,7 +222,7 @@ func (rq *replicateQueue) shouldQueue( if action == AllocatorRemoveLearner { return true, priority } - voterReplicas := desc.Replicas().Voters() + voterReplicas := desc.Replicas().VoterDescriptors() if action == AllocatorNoop { log.VEventf(ctx, 2, "no action to take") @@ -321,7 +321,7 @@ func (rq *replicateQueue) processOneChange( // Avoid taking action if the range has too many dead replicas to make // quorum. - voterReplicas := desc.Replicas().Voters() + voterReplicas := desc.Replicas().VoterDescriptors() liveVoterReplicas, deadVoterReplicas := rq.allocator.storePool.liveAndDeadReplicas(voterReplicas) // NB: the replication layer ensures that the below operations don't cause @@ -710,7 +710,7 @@ func (rq *replicateQueue) removeDecommissioning( ctx context.Context, repl *Replica, dryRun bool, ) (requeue bool, _ error) { desc, _ := repl.DescAndZone() - decommissioningReplicas := rq.allocator.storePool.decommissioningReplicas(desc.Replicas().All()) + decommissioningReplicas := rq.allocator.storePool.decommissioningReplicas(desc.Replicas().Descriptors()) if len(decommissioningReplicas) == 0 { log.VEventf(ctx, 1, "range of replica %s was identified as having decommissioning replicas, "+ "but no decommissioning replicas were found", repl) @@ -784,7 +784,7 @@ func (rq *replicateQueue) removeLearner( ctx context.Context, repl *Replica, dryRun bool, ) (requeue bool, _ error) { desc := repl.Desc() - learnerReplicas := desc.Replicas().Learners() + learnerReplicas := desc.Replicas().LearnerDescriptors() if len(learnerReplicas) == 0 { log.VEventf(ctx, 1, "range of replica %s was identified as having learner replicas, "+ "but no learner replicas were found", repl) @@ -961,11 +961,11 @@ func (rq *replicateQueue) shedLease( opts transferLeaseOptions, ) (leaseTransferOutcome, error) { // Learner replicas aren't allowed to become the leaseholder or raft leader, - // so only consider the `Voters` replicas. + // so only consider the `VoterDescriptors` replicas. target := rq.allocator.TransferLeaseTarget( ctx, zone, - desc.Replicas().Voters(), + desc.Replicas().VoterDescriptors(), repl.store.StoreID(), repl.leaseholderStats, opts.checkTransferLeaseSource, diff --git a/pkg/kv/kvserver/replicate_queue_test.go b/pkg/kv/kvserver/replicate_queue_test.go index d4fc9da8fa54..0bdf609dcd39 100644 --- a/pkg/kv/kvserver/replicate_queue_test.go +++ b/pkg/kv/kvserver/replicate_queue_test.go @@ -85,7 +85,7 @@ func testReplicateQueueRebalanceInner(t *testing.T, atomic bool) { // queue is already hard at work. testutils.SucceedsSoon(t, func() error { desc := tc.LookupRangeOrFatal(t, splitKey) - if i > 0 && len(desc.Replicas().Voters()) > 3 { + if i > 0 && len(desc.Replicas().VoterDescriptors()) > 3 { // Some system ranges have five replicas but user ranges only three, // so we'll see downreplications early in the startup process which // we want to ignore. Delay the splits so that we don't create @@ -146,7 +146,7 @@ func testReplicateQueueRebalanceInner(t *testing.T, atomic bool) { infos, err := queryRangeLog(tc.Conns[0], `SELECT info FROM system.rangelog ORDER BY timestamp DESC`) require.NoError(t, err) for _, info := range infos { - if _, ok := trackedRanges[info.UpdatedDesc.RangeID]; !ok || len(info.UpdatedDesc.Replicas().Voters()) <= 3 { + if _, ok := trackedRanges[info.UpdatedDesc.RangeID]; !ok || len(info.UpdatedDesc.Replicas().VoterDescriptors()) <= 3 { continue } // If we have atomic changes enabled, we expect to never see four replicas @@ -270,7 +270,7 @@ func TestReplicateQueueDownReplicate(t *testing.T) { // starts up with 5 replicas. Since it's not a system range, its default zone // config asks for 3x replication, and the replication queue will // down-replicate it. - require.Len(t, desc.Replicas().All(), 5) + require.Len(t, desc.Replicas().Descriptors(), 5) // Re-enable the replication queue. tc.ToggleReplicateQueues(true) diff --git a/pkg/kv/kvserver/reports/constraint_stats_report_test.go b/pkg/kv/kvserver/reports/constraint_stats_report_test.go index 6c45cd351a99..dd13a63f7315 100644 --- a/pkg/kv/kvserver/reports/constraint_stats_report_test.go +++ b/pkg/kv/kvserver/reports/constraint_stats_report_test.go @@ -809,8 +809,8 @@ func compileTestCase(tc baseReportTestCase) (compiledTestCase, error) { storeDescs = append(storeDescs, sds...) } storeResolver := func(r *roachpb.RangeDescriptor) []roachpb.StoreDescriptor { - stores := make([]roachpb.StoreDescriptor, len(r.Replicas().Voters())) - for i, rep := range r.Replicas().Voters() { + stores := make([]roachpb.StoreDescriptor, len(r.Replicas().VoterDescriptors())) + for i, rep := range r.Replicas().VoterDescriptors() { for _, desc := range storeDescs { if rep.StoreID == desc.StoreID { stores[i] = desc diff --git a/pkg/kv/kvserver/reports/critical_localities_report.go b/pkg/kv/kvserver/reports/critical_localities_report.go index 41d1c90afa51..f64bb15d7983 100644 --- a/pkg/kv/kvserver/reports/critical_localities_report.go +++ b/pkg/kv/kvserver/reports/critical_localities_report.go @@ -375,7 +375,7 @@ func (v *criticalLocalitiesVisitor) countRange( // "region:us-east,dc=new-york", we collect both "region:us-east" and // "region:us-east,dc=new-york". dedupLocal := make(map[string]roachpb.Locality) - for _, rep := range r.Replicas().All() { + for _, rep := range r.Replicas().Descriptors() { for s, loc := range v.allLocalities[rep.NodeID] { if _, ok := dedupLocal[s]; ok { continue @@ -405,7 +405,7 @@ func processLocalityForRange( // Compute the required quorum and the number of live nodes. If the number of // live nodes gets lower than the required quorum then the range is already // unavailable. - quorumCount := len(r.Replicas().Voters())/2 + 1 + quorumCount := len(r.Replicas().VoterDescriptors())/2 + 1 liveNodeCount := len(storeDescs) for _, storeDesc := range storeDescs { isStoreLive := nodeChecker(storeDesc.Node.NodeID) diff --git a/pkg/kv/kvserver/reports/replication_stats_report.go b/pkg/kv/kvserver/reports/replication_stats_report.go index 29bff152e5c4..8124294d5389 100644 --- a/pkg/kv/kvserver/reports/replication_stats_report.go +++ b/pkg/kv/kvserver/reports/replication_stats_report.go @@ -388,9 +388,9 @@ func (v *replicationStatsVisitor) visitSameZone(ctx context.Context, r *roachpb. func (v *replicationStatsVisitor) countRange( key ZoneKey, replicationFactor int, r *roachpb.RangeDescriptor, ) { - voters := len(r.Replicas().Voters()) + voters := len(r.Replicas().VoterDescriptors()) var liveVoters int - for _, rep := range r.Replicas().Voters() { + for _, rep := range r.Replicas().VoterDescriptors() { if v.nodeChecker(rep.NodeID) { liveVoters++ } diff --git a/pkg/kv/kvserver/reports/reporter.go b/pkg/kv/kvserver/reports/reporter.go index 717b2caef2c8..78a1bd0eb0b0 100644 --- a/pkg/kv/kvserver/reports/reporter.go +++ b/pkg/kv/kvserver/reports/reporter.go @@ -182,13 +182,13 @@ func (stats *Reporter) update( var getStoresFromGossip StoreResolver = func( r *roachpb.RangeDescriptor, ) []roachpb.StoreDescriptor { - storeDescs := make([]roachpb.StoreDescriptor, len(r.Replicas().Voters())) + storeDescs := make([]roachpb.StoreDescriptor, len(r.Replicas().VoterDescriptors())) // We'll return empty descriptors for stores that gossip doesn't have a // descriptor for. These stores will be considered to satisfy all // constraints. // TODO(andrei): note down that some descriptors were missing from gossip // somewhere in the report. - for i, repl := range r.Replicas().Voters() { + for i, repl := range r.Replicas().VoterDescriptors() { storeDescs[i] = allStores[repl.StoreID] } return storeDescs diff --git a/pkg/kv/kvserver/reset_quorum_test.go b/pkg/kv/kvserver/reset_quorum_test.go index 673cb3f3dc68..158147d9309c 100644 --- a/pkg/kv/kvserver/reset_quorum_test.go +++ b/pkg/kv/kvserver/reset_quorum_test.go @@ -87,7 +87,7 @@ func TestResetQuorum(t *testing.T) { require.NoError(t, tc.TransferRangeLease(desc, tc.Target(n2))) desc, err = tc.RemoveVoters(k, tc.Target(n1)) require.NoError(t, err) - require.Len(t, desc.Replicas().All(), 3) + require.Len(t, desc.Replicas().Descriptors(), 3) srv := tc.Server(n1) @@ -153,11 +153,11 @@ func TestResetQuorum(t *testing.T) { } return errors.Errorf("range id %v not found after resetting quorum", rangeID) })) - if len(updatedDesc.Replicas().All()) != 1 { - t.Fatalf("found %v replicas found after resetting quorum, expected 1", len(updatedDesc.Replicas().All())) + if len(updatedDesc.Replicas().Descriptors()) != 1 { + t.Fatalf("found %v replicas found after resetting quorum, expected 1", len(updatedDesc.Replicas().Descriptors())) } - if updatedDesc.Replicas().All()[0].NodeID != srv.NodeID() { - t.Fatalf("replica found after resetting quorum is on node id %v, expected node id %v", updatedDesc.Replicas().All()[0].NodeID, srv.NodeID()) + if updatedDesc.Replicas().Descriptors()[0].NodeID != srv.NodeID() { + t.Fatalf("replica found after resetting quorum is on node id %v, expected node id %v", updatedDesc.Replicas().Descriptors()[0].NodeID, srv.NodeID()) } } diff --git a/pkg/kv/kvserver/split_delay_helper.go b/pkg/kv/kvserver/split_delay_helper.go index c68cdfee29f3..ba4c9cbb9c25 100644 --- a/pkg/kv/kvserver/split_delay_helper.go +++ b/pkg/kv/kvserver/split_delay_helper.go @@ -36,7 +36,7 @@ func (sdh *splitDelayHelper) RaftStatus(ctx context.Context) (roachpb.RangeID, * raftStatus := r.raftStatusRLocked() if raftStatus != nil { updateRaftProgressFromActivity( - ctx, raftStatus.Progress, r.descRLocked().Replicas().All(), + ctx, raftStatus.Progress, r.descRLocked().Replicas().Descriptors(), func(replicaID roachpb.ReplicaID) bool { return r.mu.lastUpdateTimes.isFollowerActiveSince( ctx, replicaID, timeutil.Now(), r.store.cfg.RangeLeaseActiveDuration()) diff --git a/pkg/kv/kvserver/split_queue_test.go b/pkg/kv/kvserver/split_queue_test.go index 5bc8d716708f..02386650723e 100644 --- a/pkg/kv/kvserver/split_queue_test.go +++ b/pkg/kv/kvserver/split_queue_test.go @@ -81,7 +81,7 @@ func TestSplitQueueShouldQueue(t *testing.T) { cpy := *tc.repl.Desc() cpy.StartKey = test.start cpy.EndKey = test.end - repl, err := newReplica(ctx, &cpy, tc.store, cpy.Replicas().Voters()[0].ReplicaID) + repl, err := newReplica(ctx, &cpy, tc.store, cpy.Replicas().VoterDescriptors()[0].ReplicaID) if err != nil { t.Fatal(err) } diff --git a/pkg/kv/kvserver/store.go b/pkg/kv/kvserver/store.go index ab1bfabf3680..547a113df938 100644 --- a/pkg/kv/kvserver/store.go +++ b/pkg/kv/kvserver/store.go @@ -1119,7 +1119,7 @@ func (s *Store) SetDraining(drain bool, reporter func(int, redact.SafeString)) { // Learner replicas aren't allowed to become the leaseholder or raft // leader, so only consider the `Voters` replicas. - needsLeaseTransfer := len(r.Desc().Replicas().Voters()) > 1 && + needsLeaseTransfer := len(r.Desc().Replicas().VoterDescriptors()) > 1 && drainingLease.OwnedBy(s.StoreID()) && r.IsLeaseValid(ctx, drainingLease, s.Clock().Now()) diff --git a/pkg/kv/kvserver/store_init.go b/pkg/kv/kvserver/store_init.go index 1580744e3a58..8111a7052e93 100644 --- a/pkg/kv/kvserver/store_init.go +++ b/pkg/kv/kvserver/store_init.go @@ -170,7 +170,7 @@ func WriteInitialClusterData( ReplicaID: 1, }, } - desc.SetReplicas(roachpb.MakeReplicaDescriptors(replicas)) + desc.SetReplicas(roachpb.MakeReplicaSet(replicas)) if err := desc.Validate(); err != nil { return err } diff --git a/pkg/kv/kvserver/store_rebalancer.go b/pkg/kv/kvserver/store_rebalancer.go index dd7f7f7cf2b6..fcd63c695f1f 100644 --- a/pkg/kv/kvserver/store_rebalancer.go +++ b/pkg/kv/kvserver/store_rebalancer.go @@ -326,7 +326,7 @@ func (sr *StoreRebalancer) rebalanceStore( // TODO(a-robinson): This just updates the copies used locally by the // storeRebalancer. We may also want to update the copies in the StorePool // itself. - replicasBeforeRebalance := descBeforeRebalance.Replicas().All() + replicasBeforeRebalance := descBeforeRebalance.Replicas().Descriptors() for i := range replicasBeforeRebalance { if storeDesc := storeMap[replicasBeforeRebalance[i].StoreID]; storeDesc != nil { storeDesc.Capacity.RangeCount-- @@ -398,7 +398,7 @@ func (sr *StoreRebalancer) chooseLeaseToTransfer( // Check all the other replicas in order of increasing qps. Learner replicas // aren't allowed to become the leaseholder or raft leader, so only consider // the `Voters` replicas. - candidates := desc.Replicas().DeepCopy().Voters() + candidates := desc.Replicas().DeepCopy().VoterDescriptors() sort.Slice(candidates, func(i, j int) bool { var iQPS, jQPS float64 if desc := storeMap[candidates[i].StoreID]; desc != nil { @@ -506,7 +506,7 @@ func (sr *StoreRebalancer) chooseReplicaToRebalance( desiredReplicas := GetNeededReplicas(*zone.NumReplicas, clusterNodes) targets := make([]roachpb.ReplicationTarget, 0, desiredReplicas) targetReplicas := make([]roachpb.ReplicaDescriptor, 0, desiredReplicas) - currentReplicas := desc.Replicas().All() + currentReplicas := desc.Replicas().Descriptors() // Check the range's existing diversity score, since we want to ensure we // don't hurt locality diversity just to improve QPS. diff --git a/pkg/kv/kvserver/store_split.go b/pkg/kv/kvserver/store_split.go index 038f2246a976..17bf196f0348 100644 --- a/pkg/kv/kvserver/store_split.go +++ b/pkg/kv/kvserver/store_split.go @@ -199,7 +199,7 @@ func splitPostApply( if rightReplOrNil != nil { r.store.splitQueue.MaybeAddAsync(ctx, rightReplOrNil, now) r.store.replicateQueue.MaybeAddAsync(ctx, rightReplOrNil, now) - if len(split.RightDesc.Replicas().All()) == 1 { + if len(split.RightDesc.Replicas().Descriptors()) == 1 { // TODO(peter): In single-node clusters, we enqueue the right-hand side of // the split (the new range) for Raft processing so that the corresponding // Raft group is created. This shouldn't be necessary for correctness, but diff --git a/pkg/roachpb/data.go b/pkg/roachpb/data.go index 32a71e4f2230..6ed133467bd1 100644 --- a/pkg/roachpb/data.go +++ b/pkg/roachpb/data.go @@ -1496,7 +1496,7 @@ func writeTooOldRetryTimestamp(err *WriteTooOldError) hlc.Timestamp { // trigger applies. func (crt ChangeReplicasTrigger) Replicas() []ReplicaDescriptor { if crt.Desc != nil { - return crt.Desc.Replicas().All() + return crt.Desc.Replicas().Descriptors() } return crt.DeprecatedUpdatedReplicas } diff --git a/pkg/roachpb/data_test.go b/pkg/roachpb/data_test.go index 3dfffeeab5c8..b5e1a88659da 100644 --- a/pkg/roachpb/data_test.go +++ b/pkg/roachpb/data_test.go @@ -1732,7 +1732,7 @@ func TestChangeReplicasTrigger_String(t *testing.T) { crt.InternalRemovedReplicas = nil crt.InternalAddedReplicas = nil repl1.Type = ReplicaTypeVoterFull() - crt.Desc.SetReplicas(MakeReplicaDescriptors([]ReplicaDescriptor{repl1, learner})) + crt.Desc.SetReplicas(MakeReplicaSet([]ReplicaDescriptor{repl1, learner})) act = crt.String() require.Empty(t, crt.Added()) require.Empty(t, crt.Removed()) @@ -1781,7 +1781,7 @@ func TestChangeReplicasTrigger_ConfChange(t *testing.T) { m.ChangeReplicasTrigger.InternalAddedReplicas = in.add m.ChangeReplicasTrigger.InternalRemovedReplicas = in.del m.Desc = &RangeDescriptor{} - m.Desc.SetReplicas(MakeReplicaDescriptors(in.repls)) + m.Desc.SetReplicas(MakeReplicaSet(in.repls)) return m } diff --git a/pkg/roachpb/metadata.go b/pkg/roachpb/metadata.go index d77bb24cdc56..47d4b2bc2738 100644 --- a/pkg/roachpb/metadata.go +++ b/pkg/roachpb/metadata.go @@ -129,10 +129,8 @@ func (g RangeGeneration) String() string { func (g RangeGeneration) SafeValue() {} // NewRangeDescriptor returns a RangeDescriptor populated from the input. -func NewRangeDescriptor( - rangeID RangeID, start, end RKey, replicas ReplicaDescriptors, -) *RangeDescriptor { - repls := append([]ReplicaDescriptor(nil), replicas.All()...) +func NewRangeDescriptor(rangeID RangeID, start, end RKey, replicas ReplicaSet) *RangeDescriptor { + repls := append([]ReplicaDescriptor(nil), replicas.Descriptors()...) for i := range repls { repls[i].ReplicaID = ReplicaID(i + 1) } @@ -142,7 +140,7 @@ func NewRangeDescriptor( EndKey: end, NextReplicaID: ReplicaID(len(repls) + 1), } - desc.SetReplicas(MakeReplicaDescriptors(repls)) + desc.SetReplicas(MakeReplicaSet(repls)) return desc } @@ -209,13 +207,13 @@ func (r *RangeDescriptor) ContainsKeyRange(start, end RKey) bool { // Replicas returns the set of nodes/stores on which replicas of this range are // stored. -func (r *RangeDescriptor) Replicas() ReplicaDescriptors { - return MakeReplicaDescriptors(r.InternalReplicas) +func (r *RangeDescriptor) Replicas() ReplicaSet { + return MakeReplicaSet(r.InternalReplicas) } // SetReplicas overwrites the set of nodes/stores on which replicas of this // range are stored. -func (r *RangeDescriptor) SetReplicas(replicas ReplicaDescriptors) { +func (r *RangeDescriptor) SetReplicas(replicas ReplicaSet) { r.InternalReplicas = replicas.AsProto() } @@ -278,7 +276,7 @@ func (r *RangeDescriptor) RemoveReplica(nodeID NodeID, storeID StoreID) (Replica // GetReplicaDescriptor returns the replica which matches the specified store // ID. func (r *RangeDescriptor) GetReplicaDescriptor(storeID StoreID) (ReplicaDescriptor, bool) { - for _, repDesc := range r.Replicas().All() { + for _, repDesc := range r.Replicas().Descriptors() { if repDesc.StoreID == storeID { return repDesc, true } @@ -289,7 +287,7 @@ func (r *RangeDescriptor) GetReplicaDescriptor(storeID StoreID) (ReplicaDescript // GetReplicaDescriptorByID returns the replica which matches the specified store // ID. func (r *RangeDescriptor) GetReplicaDescriptorByID(replicaID ReplicaID) (ReplicaDescriptor, bool) { - for _, repDesc := range r.Replicas().All() { + for _, repDesc := range r.Replicas().Descriptors() { if repDesc.ReplicaID == replicaID { return repDesc, true } @@ -325,7 +323,7 @@ func (r *RangeDescriptor) Validate() error { } seen := map[ReplicaID]struct{}{} stores := map[StoreID]struct{}{} - for i, rep := range r.Replicas().All() { + for i, rep := range r.Replicas().Descriptors() { if err := rep.Validate(); err != nil { return errors.Errorf("replica %d is invalid: %s", i, err) } @@ -361,7 +359,7 @@ func (r RangeDescriptor) SafeFormat(w redact.SafePrinter, _ rune) { } w.SafeString(" [") - if allReplicas := r.Replicas().All(); len(allReplicas) > 0 { + if allReplicas := r.Replicas().Descriptors(); len(allReplicas) > 0 { for i, rep := range allReplicas { if i > 0 { w.SafeString(", ") diff --git a/pkg/roachpb/metadata_replicas.go b/pkg/roachpb/metadata_replicas.go index cf52d2d54227..b08fb078f4bc 100644 --- a/pkg/roachpb/metadata_replicas.go +++ b/pkg/roachpb/metadata_replicas.go @@ -52,31 +52,20 @@ func ReplicaTypeLearner() *ReplicaType { return &t } -// ReplicaDescriptors is a set of replicas, usually the nodes/stores on which +// ReplicaSet is a set of replicas, usually the nodes/stores on which // replicas of a range are stored. -type ReplicaDescriptors struct { +type ReplicaSet struct { wrapped []ReplicaDescriptor } -// MakeReplicaDescriptors creates a ReplicaDescriptors wrapper from a raw slice -// of individual descriptors. -// -// All construction of ReplicaDescriptors is required to go through this method -// so we can guarantee sortedness, which is used to speed up accessor -// operations. -// -// The function accepts a pointer to a slice instead of a slice directly to -// avoid an allocation when boxing the argument as a sort.Interface. This may -// cause the argument to escape to the heap for some callers, at which point -// we're trading one allocation for another. However, if the caller already has -// the slice header on the heap (which is the common case for *RangeDescriptors) -// then this is a net win. -func MakeReplicaDescriptors(replicas []ReplicaDescriptor) ReplicaDescriptors { - return ReplicaDescriptors{wrapped: replicas} +// MakeReplicaSet creates a ReplicaSet wrapper from a raw slice of individual +// descriptors. +func MakeReplicaSet(replicas []ReplicaDescriptor) ReplicaSet { + return ReplicaSet{wrapped: replicas} } // SafeFormat implements redact.SafeFormatter. -func (d ReplicaDescriptors) SafeFormat(w redact.SafePrinter, _ rune) { +func (d ReplicaSet) SafeFormat(w redact.SafePrinter, _ rune) { for i, desc := range d.wrapped { if i > 0 { w.SafeRune(',') @@ -85,13 +74,14 @@ func (d ReplicaDescriptors) SafeFormat(w redact.SafePrinter, _ rune) { } } -func (d ReplicaDescriptors) String() string { +func (d ReplicaSet) String() string { return redact.StringWithoutMarkers(d) } -// All returns every replica in the set, including both voter replicas and -// learner replicas. Voter replicas are ordered first in the returned slice. -func (d ReplicaDescriptors) All() []ReplicaDescriptor { +// Descriptors returns every replica descriptor in the set, including both voter +// replicas and learner replicas. Voter replicas are ordered first in the +// returned slice. +func (d ReplicaSet) Descriptors() []ReplicaDescriptor { return d.wrapped } @@ -112,12 +102,12 @@ func predNonVoter(rDesc ReplicaDescriptor) bool { return rDesc.GetType() == NON_VOTER } -// Voters returns the current and future voter replicas in the set. This means -// that during an atomic replication change, only the replicas that will be -// voters once the change completes will be returned; "outgoing" voters will not -// be returned even though they do in the current state retain their voting -// rights. When no atomic membership change is ongoing, this is simply the set -// of all non-learners. +// VoterDescriptors returns the descriptors of current and future voter replicas +// in the set. This means that during an atomic replication change, only the +// replicas that will be voters once the change completes will be returned; +// "outgoing" voters will not be returned even though they do in the current +// state retain their voting rights. When no atomic membership change is +// ongoing, this is simply the set of all non-learners. // // This may allocate, but it also may return the underlying slice as a // performance optimization, so it's not safe to modify the returned value. @@ -125,13 +115,13 @@ func predNonVoter(rDesc ReplicaDescriptor) bool { // TODO(tbg): go through the callers and figure out the few which want a // different subset of voters. Consider renaming this method so that it's // more descriptive. -func (d ReplicaDescriptors) Voters() []ReplicaDescriptor { - return d.Filter(predVoterFullOrIncoming) +func (d ReplicaSet) VoterDescriptors() []ReplicaDescriptor { + return d.FilterToDescriptors(predVoterFullOrIncoming) } -// Learners returns the learner replicas in the set. This may allocate, but it -// also may return the underlying slice as a performance optimization, so it's -// not safe to modify the returned value. +// LearnerDescriptors returns the learner replicas in the set. This may +// allocate, but it also may return the underlying slice as a performance +// optimization, so it's not safe to modify the returned value. // // A learner is a participant in a raft group that accepts messages but doesn't // vote. This means it doesn't affect raft quorum and thus doesn't affect the @@ -211,17 +201,17 @@ func (d ReplicaDescriptors) Voters() []ReplicaDescriptor { // However, it means a slow learner can slow down regular traffic. // // For some related mega-comments, see Replica.sendSnapshot. -func (d ReplicaDescriptors) Learners() []ReplicaDescriptor { - return d.Filter(predLearner) +func (d ReplicaSet) LearnerDescriptors() []ReplicaDescriptor { + return d.FilterToDescriptors(predLearner) } -// NonVoters returns the non-voting replicas in the set. Non-voting replicas are -// treated differently from learner replicas. Learners are a temporary internal -// state used to make atomic replication changes less disruptive to the system. -// Even though learners and non-voting replicas are both etcd/raft LearnerNodes -// under the hood, non-voting replicas are meant to be a user-visible state and -// are explicitly chosen to be placed inside certain localities via zone -// configs. +// NonVoterDescriptors returns the non-voting replica descriptors in the set. +// Non-voting replicas are treated differently from learner replicas. +// Learners are a temporary internal state used to make atomic +// replication changes less disruptive to the system. Even though learners and +// non-voting replicas are both etcd/raft LearnerNodes under the hood, +// non-voting replicas are meant to be a user-visible state and are explicitly +// chosen to be placed inside certain localities via zone configs. // // Key differences between how we treat (ephemeral) learners and (persistent) // non-voting replicas: - Non-voting replicas rely on the raft snapshot queue in @@ -238,13 +228,15 @@ func (d ReplicaDescriptors) Learners() []ReplicaDescriptor { // TODO(aayush): Expand this documentation once `AdminRelocateRange` knows how // to deal with such replicas & range merges no longer block due to the presence // of non-voting replicas. -func (d ReplicaDescriptors) NonVoters() []ReplicaDescriptor { - return d.Filter(predNonVoter) +func (d ReplicaSet) NonVoterDescriptors() []ReplicaDescriptor { + return d.FilterToDescriptors(predNonVoter) } -// Filter returns only the replica descriptors for which the supplied method +// FilterToDescriptors returns only the replica descriptors for which the supplied method // returns true. The memory returned may be shared with the receiver. -func (d ReplicaDescriptors) Filter(pred func(rDesc ReplicaDescriptor) bool) []ReplicaDescriptor { +func (d ReplicaSet) FilterToDescriptors( + pred func(rDesc ReplicaDescriptor) bool, +) []ReplicaDescriptor { // Fast path when all or none match to avoid allocations. fastpath := true out := d.wrapped @@ -268,28 +260,26 @@ func (d ReplicaDescriptors) Filter(pred func(rDesc ReplicaDescriptor) bool) []Re // setting the InternalReplicas field of a RangeDescriptor. When possible the // SetReplicas method of RangeDescriptor should be used instead, this is only // here for the convenience of tests. -func (d ReplicaDescriptors) AsProto() []ReplicaDescriptor { +func (d ReplicaSet) AsProto() []ReplicaDescriptor { return d.wrapped } // DeepCopy returns a copy of this set of replicas. Modifications to the // returned set will not affect this one and vice-versa. -func (d ReplicaDescriptors) DeepCopy() ReplicaDescriptors { - return ReplicaDescriptors{ +func (d ReplicaSet) DeepCopy() ReplicaSet { + return ReplicaSet{ wrapped: append([]ReplicaDescriptor(nil), d.wrapped...), } } // AddReplica adds the given replica to this set. -func (d *ReplicaDescriptors) AddReplica(r ReplicaDescriptor) { +func (d *ReplicaSet) AddReplica(r ReplicaDescriptor) { d.wrapped = append(d.wrapped, r) } // RemoveReplica removes the matching replica from this set. If it wasn't found // to remove, false is returned. -func (d *ReplicaDescriptors) RemoveReplica( - nodeID NodeID, storeID StoreID, -) (ReplicaDescriptor, bool) { +func (d *ReplicaSet) RemoveReplica(nodeID NodeID, storeID StoreID) (ReplicaDescriptor, bool) { idx := -1 for i := range d.wrapped { if d.wrapped[i].NodeID == nodeID && d.wrapped[i].StoreID == storeID { @@ -309,7 +299,7 @@ func (d *ReplicaDescriptors) RemoveReplica( // InAtomicReplicationChange returns true if the descriptor is in the middle of // an atomic replication change. -func (d ReplicaDescriptors) InAtomicReplicationChange() bool { +func (d ReplicaSet) InAtomicReplicationChange() bool { for _, rDesc := range d.wrapped { switch rDesc.GetType() { case VOTER_INCOMING, VOTER_OUTGOING, VOTER_DEMOTING: @@ -323,7 +313,7 @@ func (d ReplicaDescriptors) InAtomicReplicationChange() bool { } // ConfState returns the Raft configuration described by the set of replicas. -func (d ReplicaDescriptors) ConfState() raftpb.ConfState { +func (d ReplicaSet) ConfState() raftpb.ConfState { var cs raftpb.ConfState joint := d.InAtomicReplicationChange() // The incoming config is taken verbatim from the full voters when the @@ -359,7 +349,7 @@ func (d ReplicaDescriptors) ConfState() raftpb.ConfState { // CanMakeProgress reports whether the given descriptors can make progress at the // replication layer. This is more complicated than just counting the number // of replicas due to the existence of joint quorums. -func (d ReplicaDescriptors) CanMakeProgress(liveFunc func(descriptor ReplicaDescriptor) bool) bool { +func (d ReplicaSet) CanMakeProgress(liveFunc func(descriptor ReplicaDescriptor) bool) bool { isVoterOldConfig := func(rDesc ReplicaDescriptor) bool { switch rDesc.GetType() { case VOTER_FULL, VOTER_OUTGOING, VOTER_DEMOTING: @@ -385,8 +375,8 @@ func (d ReplicaDescriptors) CanMakeProgress(liveFunc func(descriptor ReplicaDesc } } - votersOldGroup := d.Filter(isVoterOldConfig) - liveVotersOldGroup := d.Filter(isBoth(isVoterOldConfig, liveFunc)) + votersOldGroup := d.FilterToDescriptors(isVoterOldConfig) + liveVotersOldGroup := d.FilterToDescriptors(isBoth(isVoterOldConfig, liveFunc)) n := len(votersOldGroup) // Empty groups succeed by default, to match the Raft implementation. @@ -394,8 +384,8 @@ func (d ReplicaDescriptors) CanMakeProgress(liveFunc func(descriptor ReplicaDesc return false } - votersNewGroup := d.Filter(isVoterNewConfig) - liveVotersNewGroup := d.Filter(isBoth(isVoterNewConfig, liveFunc)) + votersNewGroup := d.FilterToDescriptors(isVoterNewConfig) + liveVotersNewGroup := d.FilterToDescriptors(isBoth(isVoterNewConfig, liveFunc)) n = len(votersNewGroup) return len(liveVotersNewGroup) >= n/2+1 diff --git a/pkg/roachpb/metadata_replicas_test.go b/pkg/roachpb/metadata_replicas_test.go index 9477016747da..8fac0c4dd3ff 100644 --- a/pkg/roachpb/metadata_replicas_test.go +++ b/pkg/roachpb/metadata_replicas_test.go @@ -61,9 +61,9 @@ func TestVotersLearnersAll(t *testing.T) { } for _, test := range tests { t.Run("", func(t *testing.T) { - r := MakeReplicaDescriptors(test) + r := MakeReplicaSet(test) seen := map[ReplicaDescriptor]struct{}{} - for _, voter := range r.Voters() { + for _, voter := range r.VoterDescriptors() { typ := voter.GetType() switch typ { case VOTER_FULL, VOTER_INCOMING: @@ -72,14 +72,14 @@ func TestVotersLearnersAll(t *testing.T) { assert.FailNow(t, "unexpectedly got a %s as Voter()", typ) } } - for _, learner := range r.Learners() { + for _, learner := range r.LearnerDescriptors() { seen[learner] = struct{}{} assert.Equal(t, LEARNER, learner.GetType()) } - all := r.All() + all := r.Descriptors() // Make sure that VOTER_OUTGOING is the only type that is skipped both - // by Learners() and Voters() + // by LearnerDescriptors() and VoterDescriptors() for _, rd := range all { typ := rd.GetType() if _, seen := seen[rd]; !seen { @@ -126,21 +126,21 @@ func TestReplicaDescriptorsRemove(t *testing.T) { }, } for i, test := range tests { - r := MakeReplicaDescriptors(test.replicas) - lenBefore := len(r.All()) + r := MakeReplicaSet(test.replicas) + lenBefore := len(r.Descriptors()) removedDesc, ok := r.RemoveReplica(test.remove.NodeID, test.remove.StoreID) assert.Equal(t, test.expected, ok, "testcase %d", i) if ok { assert.Equal(t, test.remove.NodeID, removedDesc.NodeID, "testcase %d", i) assert.Equal(t, test.remove.StoreID, removedDesc.StoreID, "testcase %d", i) - assert.Equal(t, lenBefore-1, len(r.All()), "testcase %d", i) + assert.Equal(t, lenBefore-1, len(r.Descriptors()), "testcase %d", i) } else { - assert.Equal(t, lenBefore, len(r.All()), "testcase %d", i) + assert.Equal(t, lenBefore, len(r.Descriptors()), "testcase %d", i) } - for _, voter := range r.Voters() { + for _, voter := range r.VoterDescriptors() { assert.Equal(t, VOTER_FULL, voter.GetType(), "testcase %d", i) } - for _, learner := range r.Learners() { + for _, learner := range r.LearnerDescriptors() { assert.Equal(t, LEARNER, learner.GetType(), "testcase %d", i) } } @@ -202,7 +202,7 @@ func TestReplicaDescriptorsConfState(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - r := MakeReplicaDescriptors(test.in) + r := MakeReplicaSet(test.in) cs := r.ConfState() require.Equal(t, test.out, raft.DescribeConfState(cs)) }) @@ -308,7 +308,7 @@ func TestReplicaDescriptorsCanMakeProgress(t *testing.T) { rds = append(rds, rDesc.ReplicaDescriptor) } - act := MakeReplicaDescriptors(rds).CanMakeProgress(func(rd ReplicaDescriptor) bool { + act := MakeReplicaSet(rds).CanMakeProgress(func(rd ReplicaDescriptor) bool { for _, rdi := range test.rds { if rdi.ReplicaID == rd.ReplicaID { return rdi.live @@ -346,7 +346,7 @@ func TestReplicaDescriptorsCanMakeProgressRandom(t *testing.T) { liveness[i] = (livenessBits >> i & 1) == 0 } - rng := MakeReplicaDescriptors(rds) + rng := MakeReplicaSet(rds) crdbCanMakeProgress := rng.CanMakeProgress(func(rd ReplicaDescriptor) bool { return liveness[rd.ReplicaID-1] diff --git a/pkg/server/admin.go b/pkg/server/admin.go index a792f1656afa..887f9f0035cc 100644 --- a/pkg/server/admin.go +++ b/pkg/server/admin.go @@ -774,7 +774,7 @@ func (s *adminServer) statsForSpan( if err := kv.Value.GetProto(&rng); err != nil { return nil, s.serverError(err) } - for _, repl := range rng.Replicas().All() { + for _, repl := range rng.Replicas().Descriptors() { nodeIDs[repl.NodeID] = struct{}{} } } @@ -1704,7 +1704,7 @@ func (s *adminServer) DecommissionStatus( if err := row.ValueProto(&rangeDesc); err != nil { return errors.Wrapf(err, "%s: unable to unmarshal range descriptor", row.Key) } - for _, r := range rangeDesc.Replicas().All() { + for _, r := range rangeDesc.Replicas().Descriptors() { if _, ok := replicaCounts[r.NodeID]; ok { replicaCounts[r.NodeID]++ } @@ -1891,7 +1891,7 @@ func (s *adminServer) DataDistribution( return err } - for _, replicaDesc := range rangeDesc.Replicas().All() { + for _, replicaDesc := range rangeDesc.Replicas().Descriptors() { tableInfo, ok := tableInfosByTableID[tableID] if !ok { // This is a database, skip. diff --git a/pkg/server/node.go b/pkg/server/node.go index 5b81b06aba85..f623dd125a76 100644 --- a/pkg/server/node.go +++ b/pkg/server/node.go @@ -1068,7 +1068,7 @@ func (n *Node) ResetQuorum( } // Update the range descriptor and update meta ranges for the descriptor, removing all replicas. - deadReplicas := append([]roachpb.ReplicaDescriptor(nil), desc.Replicas().All()...) + deadReplicas := append([]roachpb.ReplicaDescriptor(nil), desc.Replicas().Descriptors()...) for _, rd := range deadReplicas { desc.RemoveReplica(rd.NodeID, rd.StoreID) } diff --git a/pkg/server/status.go b/pkg/server/status.go index 5ce918a85926..366dc6ec9f1a 100644 --- a/pkg/server/status.go +++ b/pkg/server/status.go @@ -1416,7 +1416,7 @@ func (s *statusServer) RaftDebug( desc := node.Range.State.Desc // Check for whether replica should be GCed. containsNode := false - for _, replica := range desc.Replicas().All() { + for _, replica := range desc.Replicas().Descriptors() { if replica.NodeID == node.NodeID { containsNode = true } diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index b7640b98d0e6..b803e6cb2f4b 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -2554,9 +2554,9 @@ CREATE TABLE crdb_internal.ranges_no_leases ( return nil, err } - voterReplicas := append([]roachpb.ReplicaDescriptor(nil), desc.Replicas().Voters()...) + voterReplicas := append([]roachpb.ReplicaDescriptor(nil), desc.Replicas().VoterDescriptors()...) var learnerReplicaStoreIDs []int - for _, rd := range desc.Replicas().Learners() { + for _, rd := range desc.Replicas().LearnerDescriptors() { learnerReplicaStoreIDs = append(learnerReplicaStoreIDs, int(rd.StoreID)) } sort.Slice(voterReplicas, func(i, j int) bool { diff --git a/pkg/testutils/testcluster/testcluster.go b/pkg/testutils/testcluster/testcluster.go index 441198d93739..5370ecb6cf98 100644 --- a/pkg/testutils/testcluster/testcluster.go +++ b/pkg/testutils/testcluster/testcluster.go @@ -777,8 +777,8 @@ func (tc *TestCluster) FindRangeLease( } } else { hint = &roachpb.ReplicationTarget{ - NodeID: rangeDesc.Replicas().All()[0].NodeID, - StoreID: rangeDesc.Replicas().All()[0].StoreID} + NodeID: rangeDesc.Replicas().Descriptors()[0].NodeID, + StoreID: rangeDesc.Replicas().Descriptors()[0].StoreID} } // Find the server indicated by the hint and send a LeaseInfoRequest through @@ -853,7 +853,7 @@ func (tc *TestCluster) WaitForSplitAndInitialization(startKey roachpb.Key) error startKey, desc.StartKey) } // Once we've verified the split, make sure that replicas exist. - for _, rDesc := range desc.Replicas().All() { + for _, rDesc := range desc.Replicas().Descriptors() { store, err := tc.findMemberStore(rDesc.StoreID) if err != nil { return err