Skip to content

Commit

Permalink
kvserver, roachpb: rename ReplicaDescriptors to ReplicaSet
Browse files Browse the repository at this point in the history
This commit renames `ReplicaDescriptors`, which provides some utility
methods over a `[]ReplicaDescriptor`. This is because the current naming
makes it hard/confusing to add new methods that return another
`ReplicaDescriptors` instead of the underlying slice.

Release note: None
  • Loading branch information
aayushshah15 committed Jan 7, 2021
1 parent 867340b commit d2e7818
Show file tree
Hide file tree
Showing 51 changed files with 235 additions and 247 deletions.
4 changes: 2 additions & 2 deletions pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) {
require.Equal(t, []roachpb.ReplicaDescriptor{
{NodeID: 1, StoreID: 1, ReplicaID: 1},
{NodeID: 2, StoreID: 2, ReplicaID: 2},
}, entry.Desc().Replicas().All())
}, entry.Desc().Replicas().Descriptors())

// Relocate the follower. n2 will no longer have a replica.
n1.Exec(t, `ALTER TABLE test EXPERIMENTAL_RELOCATE VALUES (ARRAY[1,3], 1)`)
Expand All @@ -296,7 +296,7 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) {
require.Equal(t, []roachpb.ReplicaDescriptor{
{NodeID: 1, StoreID: 1, ReplicaID: 1},
{NodeID: 3, StoreID: 3, ReplicaID: 3},
}, entry.Desc().Replicas().All())
}, entry.Desc().Replicas().Descriptors())

// Make a note of the follower reads metric on n3. We'll check that it was
// incremented.
Expand Down
4 changes: 2 additions & 2 deletions pkg/cli/debug.go
Original file line number Diff line number Diff line change
Expand Up @@ -955,7 +955,7 @@ func removeDeadReplicas(
err = kvserver.IterateRangeDescriptors(ctx, db, func(desc roachpb.RangeDescriptor) error {
hasSelf := false
numDeadPeers := 0
allReplicas := desc.Replicas().All()
allReplicas := desc.Replicas().Descriptors()
maxLivePeer := roachpb.StoreID(-1)
for _, rep := range allReplicas {
if rep.StoreID == storeIdent.StoreID {
Expand Down Expand Up @@ -998,7 +998,7 @@ func removeDeadReplicas(
StoreID: storeIdent.StoreID,
ReplicaID: desc.NextReplicaID,
}}
newDesc.SetReplicas(roachpb.MakeReplicaDescriptors(replicas))
newDesc.SetReplicas(roachpb.MakeReplicaSet(replicas))
newDesc.NextReplicaID++
fmt.Printf("Replica %s -> %s\n", &desc, &newDesc)
newDescs = append(newDescs, newDesc)
Expand Down
18 changes: 9 additions & 9 deletions pkg/kv/kvclient/kvcoord/dist_sender_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -586,7 +586,7 @@ func TestRetryOnNotLeaseHolderError(t *testing.T) {
defer log.Scope(t).Close(t)
ctx := context.Background()

recognizedLeaseHolder := testUserRangeDescriptor3Replicas.Replicas().Voters()[1]
recognizedLeaseHolder := testUserRangeDescriptor3Replicas.Replicas().VoterDescriptors()[1]
unrecognizedLeaseHolder := roachpb.ReplicaDescriptor{
NodeID: 99,
StoreID: 999,
Expand Down Expand Up @@ -642,7 +642,7 @@ func TestRetryOnNotLeaseHolderError(t *testing.T) {
clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond)
rpcContext := rpc.NewInsecureTestingContext(clock, stopper)
g := makeGossip(t, stopper, rpcContext)
for _, n := range testUserRangeDescriptor3Replicas.Replicas().Voters() {
for _, n := range testUserRangeDescriptor3Replicas.Replicas().VoterDescriptors() {
require.NoError(t, g.AddInfoProto(
gossip.MakeNodeIDKey(n.NodeID),
newNodeDesc(n.NodeID),
Expand Down Expand Up @@ -823,7 +823,7 @@ func TestDistSenderMovesOnFromReplicaWithStaleLease(t *testing.T) {
clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond)
rpcContext := rpc.NewInsecureTestingContext(clock, stopper)
g := makeGossip(t, stopper, rpcContext)
for _, n := range testUserRangeDescriptor3Replicas.Replicas().Voters() {
for _, n := range testUserRangeDescriptor3Replicas.Replicas().VoterDescriptors() {
require.NoError(t, g.AddInfoProto(
gossip.MakeNodeIDKey(n.NodeID),
newNodeDesc(n.NodeID),
Expand Down Expand Up @@ -4329,17 +4329,17 @@ func TestDistSenderDescEvictionAfterLeaseUpdate(t *testing.T) {
br := &roachpb.BatchResponse{}
switch call {
case 0:
expRepl := desc1.Replicas().All()[0]
expRepl := desc1.Replicas().Descriptors()[0]
require.Equal(t, expRepl, ba.Replica)
br.Error = roachpb.NewError(&roachpb.NotLeaseHolderError{
Lease: &roachpb.Lease{Replica: desc1.Replicas().All()[1]},
Lease: &roachpb.Lease{Replica: desc1.Replicas().Descriptors()[1]},
})
case 1:
expRep := desc1.Replicas().All()[1]
expRep := desc1.Replicas().Descriptors()[1]
require.Equal(t, ba.Replica, expRep)
br.Error = roachpb.NewError(roachpb.NewRangeNotFoundError(ba.RangeID, ba.Replica.StoreID))
case 2:
expRep := desc2.Replicas().All()[0]
expRep := desc2.Replicas().Descriptors()[0]
require.Equal(t, ba.Replica, expRep)
br = ba.CreateReply()
default:
Expand Down Expand Up @@ -4420,7 +4420,7 @@ func TestDistSenderRPCMetrics(t *testing.T) {
br := &roachpb.BatchResponse{}
if call == 0 {
br.Error = roachpb.NewError(&roachpb.NotLeaseHolderError{
Lease: &roachpb.Lease{Replica: desc.Replicas().All()[1]},
Lease: &roachpb.Lease{Replica: desc.Replicas().Descriptors()[1]},
})
} else {
br.Error = roachpb.NewError(&roachpb.ConditionFailedError{})
Expand Down Expand Up @@ -4449,7 +4449,7 @@ func TestDistSenderRPCMetrics(t *testing.T) {
ds.rangeCache.Insert(ctx, roachpb.RangeInfo{
Desc: desc,
Lease: roachpb.Lease{
Replica: desc.Replicas().All()[0],
Replica: desc.Replicas().Descriptors()[0],
},
})
var ba roachpb.BatchRequest
Expand Down
6 changes: 3 additions & 3 deletions pkg/kv/kvclient/kvcoord/replica_slice.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,11 +67,11 @@ func NewReplicaSlice(
}

// Learner replicas won't serve reads/writes, so we'll send only to the
// `Voters` replicas. This is just an optimization to save a network hop,
// `VoterDescriptors` replicas. This is just an optimization to save a network hop,
// everything would still work if we had `All` here.
voters := desc.Replicas().Voters()
voters := desc.Replicas().VoterDescriptors()
// If we know a leaseholder, though, let's make sure we include it.
if leaseholder != nil && len(voters) < len(desc.Replicas().All()) {
if leaseholder != nil && len(voters) < len(desc.Replicas().Descriptors()) {
found := false
for _, v := range voters {
if v == *leaseholder {
Expand Down
2 changes: 1 addition & 1 deletion pkg/kv/kvnemesis/applier.go
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ func newGetReplicasFn(dbs ...*kv.DB) GetReplicasFn {
ctx := context.Background()
return func(key roachpb.Key) []roachpb.ReplicationTarget {
desc := getRangeDesc(ctx, key, dbs...)
replicas := desc.Replicas().All()
replicas := desc.Replicas().Descriptors()
targets := make([]roachpb.ReplicationTarget, len(replicas))
for i, replica := range replicas {
targets[i] = roachpb.ReplicationTarget{
Expand Down
4 changes: 2 additions & 2 deletions pkg/kv/kvserver/allocator.go
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ func (a *Allocator) ComputeAction(
// On the other hand if we get the race where a leaseholder starts adding a
// replica in the replicate queue and during this loses its lease, it should
// probably not retry.
if learners := desc.Replicas().Learners(); len(learners) > 0 {
if learners := desc.Replicas().LearnerDescriptors(); len(learners) > 0 {
// TODO(dan): Since this goes before anything else, the priority here should
// be influenced by whatever operations would happen right after the learner
// is removed. In the meantime, we don't want to block something important
Expand All @@ -356,7 +356,7 @@ func (a *Allocator) ComputeAction(
return AllocatorRemoveLearner, removeLearnerReplicaPriority
}
// computeAction expects to operate only on voters.
return a.computeAction(ctx, zone, desc.Replicas().Voters())
return a.computeAction(ctx, zone, desc.Replicas().VoterDescriptors())
}

func (a *Allocator) computeAction(
Expand Down
2 changes: 1 addition & 1 deletion pkg/kv/kvserver/batcheval/cmd_lease_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func TestLeaseCommandLearnerReplica(t *testing.T) {
{NodeID: 2, StoreID: learnerStoreID, Type: roachpb.ReplicaTypeLearner(), ReplicaID: 2},
}
desc := roachpb.RangeDescriptor{}
desc.SetReplicas(roachpb.MakeReplicaDescriptors(replicas))
desc.SetReplicas(roachpb.MakeReplicaSet(replicas))
cArgs := CommandArgs{
EvalCtx: (&MockEvalCtx{
StoreID: voterStoreID,
Expand Down
6 changes: 3 additions & 3 deletions pkg/kv/kvserver/client_merge_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1992,11 +1992,11 @@ func TestStoreRangeMergeAddReplicaRace(t *testing.T) {
`|cannot merge range with non-voter replicas`
if mergeErr == nil && kvserver.IsRetriableReplicationChangeError(addErr) {
// Merge won the race, no add happened.
require.Len(t, afterDesc.Replicas().Voters(), 1)
require.Len(t, afterDesc.Replicas().VoterDescriptors(), 1)
require.Equal(t, origDesc.EndKey, afterDesc.EndKey)
} else if addErr == nil && testutils.IsError(mergeErr, acceptableMergeErr) {
// Add won the race, no merge happened.
require.Len(t, afterDesc.Replicas().Voters(), 2)
require.Len(t, afterDesc.Replicas().VoterDescriptors(), 2)
require.Equal(t, beforeDesc.EndKey, afterDesc.EndKey)
} else {
t.Fatalf(`expected exactly one of merge or add to succeed got: [merge] %v [add] %v`,
Expand Down Expand Up @@ -2032,7 +2032,7 @@ func TestStoreRangeMergeResplitAddReplicaRace(t *testing.T) {
assert.Equal(t, origDesc.RangeID, resplitDesc.RangeID)
assert.Equal(t, origDesc.StartKey, resplitDesc.StartKey)
assert.Equal(t, origDesc.EndKey, resplitDesc.EndKey)
assert.Equal(t, origDesc.Replicas().All(), resplitDesc.Replicas().All())
assert.Equal(t, origDesc.Replicas().Descriptors(), resplitDesc.Replicas().Descriptors())
assert.NotEqual(t, origDesc.Generation, resplitDesc.Generation)

_, err := tc.Server(0).DB().AdminChangeReplicas(
Expand Down
4 changes: 2 additions & 2 deletions pkg/kv/kvserver/client_migration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,8 @@ func TestMigrateWithInflightSnapshot(t *testing.T) {
// added.
<-blockUntilSnapshotCh
desc := tc.LookupRangeOrFatal(t, k)
require.Len(t, desc.Replicas().Voters(), 1)
require.Len(t, desc.Replicas().Learners(), 1)
require.Len(t, desc.Replicas().VoterDescriptors(), 1)
require.Len(t, desc.Replicas().LearnerDescriptors(), 1)

// Enqueue the replica in the raftsnapshot queue. We use SucceedsSoon
// because it may take a bit for raft to figure out that we need to be
Expand Down
6 changes: 3 additions & 3 deletions pkg/kv/kvserver/client_raft_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3173,19 +3173,19 @@ func TestDecommission(t *testing.T) {
requireNoReplicas := func(storeID roachpb.StoreID, repFactor int) {
testutils.SucceedsSoon(t, func() error {
desc := tc.LookupRangeOrFatal(t, k)
for _, rDesc := range desc.Replicas().Voters() {
for _, rDesc := range desc.Replicas().VoterDescriptors() {
store, err := tc.Servers[int(rDesc.NodeID-1)].Stores().GetStore(rDesc.StoreID)
require.NoError(t, err)
if err := store.ForceReplicationScanAndProcess(); err != nil {
return err
}
}
if sl := desc.Replicas().Filter(func(rDesc roachpb.ReplicaDescriptor) bool {
if sl := desc.Replicas().FilterToDescriptors(func(rDesc roachpb.ReplicaDescriptor) bool {
return rDesc.StoreID == storeID
}); len(sl) > 0 {
return errors.Errorf("still a replica on s%d: %s", storeID, &desc)
}
if len(desc.Replicas().Voters()) != repFactor {
if len(desc.Replicas().VoterDescriptors()) != repFactor {
return errors.Errorf("expected %d replicas: %s", repFactor, &desc)
}
return nil
Expand Down
2 changes: 1 addition & 1 deletion pkg/kv/kvserver/client_relocate_range_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func requireDescMembers(
sort.Slice(targets, func(i, j int) bool { return targets[i].StoreID < targets[j].StoreID })

have := make([]roachpb.ReplicationTarget, 0, len(targets))
for _, rDesc := range desc.Replicas().All() {
for _, rDesc := range desc.Replicas().Descriptors() {
have = append(have, roachpb.ReplicationTarget{
NodeID: rDesc.NodeID,
StoreID: rDesc.StoreID,
Expand Down
4 changes: 2 additions & 2 deletions pkg/kv/kvserver/client_replica_backpressure_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ func TestBackpressureNotAppliedWhenReducingRangeSize(t *testing.T) {
// replicas to move according to them.
tc.ToggleReplicateQueues(false)
defer tc.ToggleReplicateQueues(true)
voters := desc.Replicas().Voters()
voters := desc.Replicas().VoterDescriptors()
if len(voters) == 1 && voters[0].NodeID == tc.Server(1).NodeID() {
return nil
}
Expand Down Expand Up @@ -270,7 +270,7 @@ func TestBackpressureNotAppliedWhenReducingRangeSize(t *testing.T) {

s, repl := getFirstStoreReplica(t, tc.Server(1), tablePrefix)
s.SetReplicateQueueActive(false)
require.Len(t, repl.Desc().Replicas().All(), 1)
require.Len(t, repl.Desc().Replicas().Descriptors(), 1)
// We really need to make sure that the split queue has hit this range,
// otherwise we'll fail to backpressure.
go func() { _ = s.ForceSplitScanAndProcess() }()
Expand Down
4 changes: 2 additions & 2 deletions pkg/kv/kvserver/client_replica_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1950,7 +1950,7 @@ func TestSystemZoneConfigs(t *testing.T) {
replicas := make(map[roachpb.RangeID]roachpb.RangeDescriptor)
for _, s := range tc.Servers {
if err := kvserver.IterateRangeDescriptors(ctx, s.Engines()[0], func(desc roachpb.RangeDescriptor) error {
if len(desc.Replicas().Learners()) > 0 {
if len(desc.Replicas().LearnerDescriptors()) > 0 {
return fmt.Errorf("descriptor contains learners: %v", desc)
}
if existing, ok := replicas[desc.RangeID]; ok && !existing.Equal(&desc) {
Expand All @@ -1964,7 +1964,7 @@ func TestSystemZoneConfigs(t *testing.T) {
}
var totalReplicas int
for _, desc := range replicas {
totalReplicas += len(desc.Replicas().Voters())
totalReplicas += len(desc.Replicas().VoterDescriptors())
}
if totalReplicas != expectedReplicas {
return fmt.Errorf("got %d voters, want %d; details: %+v", totalReplicas, expectedReplicas, replicas)
Expand Down
2 changes: 1 addition & 1 deletion pkg/kv/kvserver/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ func createTestStoreWithOpts(
var ba roachpb.BatchRequest
get := roachpb.GetRequest{}
get.Key = keys.LocalMax
ba.Header.Replica = repl.Desc().Replicas().Voters()[0]
ba.Header.Replica = repl.Desc().Replicas().VoterDescriptors()[0]
ba.Header.RangeID = repl.RangeID
ba.Add(&get)
_, pErr := store.Send(ctx, ba)
Expand Down
2 changes: 1 addition & 1 deletion pkg/kv/kvserver/consistency_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ func consistencyQueueShouldQueueImpl(
}
}
// Check if all replicas are live.
for _, rep := range data.desc.Replicas().All() {
for _, rep := range data.desc.Replicas().Descriptors() {
if live, err := data.isNodeLive(rep.NodeID); err != nil {
log.VErrEventf(ctx, 3, "node %d liveness failed: %s", rep.NodeID, err)
return false, 0
Expand Down
2 changes: 1 addition & 1 deletion pkg/kv/kvserver/merge_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ func (mq *mergeQueue) process(
return false, err
}
}
lhsReplicas, rhsReplicas := lhsDesc.Replicas().All(), rhsDesc.Replicas().All()
lhsReplicas, rhsReplicas := lhsDesc.Replicas().Descriptors(), rhsDesc.Replicas().Descriptors()

// Defensive sanity check that everything is now a voter.
for i := range lhsReplicas {
Expand Down
2 changes: 1 addition & 1 deletion pkg/kv/kvserver/raft_log_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ func newTruncateDecision(ctx context.Context, r *Replica) (truncateDecision, err
log.Eventf(ctx, "raft status before lastUpdateTimes check: %+v", raftStatus.Progress)
log.Eventf(ctx, "lastUpdateTimes: %+v", r.mu.lastUpdateTimes)
updateRaftProgressFromActivity(
ctx, raftStatus.Progress, r.descRLocked().Replicas().All(),
ctx, raftStatus.Progress, r.descRLocked().Replicas().Descriptors(),
func(replicaID roachpb.ReplicaID) bool {
return r.mu.lastUpdateTimes.isFollowerActiveSince(
ctx, replicaID, now, r.store.cfg.RangeLeaseActiveDuration())
Expand Down
4 changes: 2 additions & 2 deletions pkg/kv/kvserver/replica.go
Original file line number Diff line number Diff line change
Expand Up @@ -865,7 +865,7 @@ func maxReplicaIDOfAny(desc *roachpb.RangeDescriptor) roachpb.ReplicaID {
return 0
}
var maxID roachpb.ReplicaID
for _, repl := range desc.Replicas().All() {
for _, repl := range desc.Replicas().Descriptors() {
if repl.ReplicaID > maxID {
maxID = repl.ReplicaID
}
Expand Down Expand Up @@ -1066,7 +1066,7 @@ func (r *Replica) State() kvserverpb.RangeInfo {
if desc := ri.ReplicaState.Desc; desc != nil {
// Learner replicas don't serve follower reads, but they still receive
// closed timestamp updates, so include them here.
allReplicas := desc.Replicas().All()
allReplicas := desc.Replicas().Descriptors()
for i := range allReplicas {
replDesc := &allReplicas[i]
r.store.cfg.ClosedTimestamp.Storage.VisitDescending(replDesc.NodeID, func(e ctpb.Entry) (done bool) {
Expand Down
Loading

0 comments on commit d2e7818

Please sign in to comment.