diff --git a/pkg/kv/kvclient/kvcoord/send_test.go b/pkg/kv/kvclient/kvcoord/send_test.go index 3a96cb12e136..81b6a19e7c99 100644 --- a/pkg/kv/kvclient/kvcoord/send_test.go +++ b/pkg/kv/kvclient/kvcoord/send_test.go @@ -104,7 +104,7 @@ func (n Node) UpdateSpanConfigs( func (n Node) SpanConfigConformance( context.Context, *roachpb.SpanConfigConformanceRequest, ) (*roachpb.SpanConfigConformanceResponse, error) { - panic("implement me") + panic("unimplemented") } func (n Node) TenantSettings( diff --git a/pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go b/pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go index d37feee0152f..87c6dd09a6bd 100644 --- a/pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go +++ b/pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go @@ -3971,7 +3971,7 @@ func TestRemoveCandidatesNumReplicasConstraints(t *testing.T) { StoreID: storeID, } } - analyzed := constraint.AnalyzeConstraints(a.StorePool, existingRepls, 0, tc.constraints) + analyzed := constraint.AnalyzeConstraints(a.StorePool, existingRepls, 0 /* numReplicas */, tc.constraints) // Check behavior in a span config where `voter_constraints` are empty. checkFn := voterConstraintsCheckerForRemoval(analyzed, constraint.EmptyAnalyzedConstraints) diff --git a/pkg/kv/kvserver/replica_metrics.go b/pkg/kv/kvserver/replica_metrics.go index efbe8d941993..5e1d332c8f18 100644 --- a/pkg/kv/kvserver/replica_metrics.go +++ b/pkg/kv/kvserver/replica_metrics.go @@ -223,7 +223,7 @@ func calcRangeCounter( // needed{Voters,NonVoters} - we don't care about the // under/over-replication determinations from the report because // it's too magic. We'll do our own determination below. - 0, 0) + 0, -1) unavailable = !status.Available liveVoters := calcLiveVoterReplicas(desc, livenessMap) liveNonVoters := calcLiveNonVoterReplicas(desc, livenessMap) diff --git a/pkg/kv/kvserver/reports/replication_stats_report.go b/pkg/kv/kvserver/reports/replication_stats_report.go index 863f91d097eb..ee5f08d4218e 100644 --- a/pkg/kv/kvserver/reports/replication_stats_report.go +++ b/pkg/kv/kvserver/reports/replication_stats_report.go @@ -404,7 +404,7 @@ func (v *replicationStatsVisitor) countRange( // NB: this reporting code was written before ReplicationStatus reported // on non-voting replicas. This code will also soon be removed in favor // of something that works with multi-tenancy (#89987). - }, replicationFactor, 0) + }, replicationFactor, -1 /* neededNonVoters */) // Note that a range can be under-replicated and over-replicated at the same // time if it has many replicas, but sufficiently many of them are on dead // nodes. diff --git a/pkg/roachpb/metadata_replicas.go b/pkg/roachpb/metadata_replicas.go index 4401237f15d4..2b874d082cdd 100644 --- a/pkg/roachpb/metadata_replicas.go +++ b/pkg/roachpb/metadata_replicas.go @@ -378,7 +378,7 @@ func (d ReplicaSet) HasReplicaOnNode(nodeID NodeID) bool { // the replication layer. This is more complicated than just counting the number // of replicas due to the existence of joint quorums. func (d ReplicaSet) CanMakeProgress(liveFunc func(descriptor ReplicaDescriptor) bool) bool { - return d.ReplicationStatus(liveFunc, 0 /* neededVoters */, 0 /* neededNonVoters*/).Available + return d.ReplicationStatus(liveFunc, 0 /* neededVoters */, -1 /* neededNonVoters*/).Available } // RangeStatusReport contains info about a range's replication status. Returned @@ -412,7 +412,8 @@ type RangeStatusReport struct { // neededVoters is the replica's desired replication for purposes of determining // over/under-replication of voters. If the caller is only interested in // availability of voting replicas, 0 can be passed in. neededNonVoters is the -// counterpart for non-voting replicas. +// counterpart for non-voting replicas but with -1 as the sentinel value (unlike +// voters, it's possible to expect 0 non-voters). func (d ReplicaSet) ReplicationStatus( liveFunc func(descriptor ReplicaDescriptor) bool, neededVoters int, neededNonVoters int, ) RangeStatusReport { @@ -454,7 +455,7 @@ func (d ReplicaSet) ReplicationStatus( overReplicatedNewGroup := len(votersNewGroup) > neededVoters res.UnderReplicated = underReplicatedOldGroup || underReplicatedNewGroup res.OverReplicated = overReplicatedOldGroup || overReplicatedNewGroup - if neededNonVoters == 0 { + if neededNonVoters == -1 { return res } diff --git a/pkg/roachpb/span_config.proto b/pkg/roachpb/span_config.proto index dc298ac6bae2..5c6a9bb5e604 100644 --- a/pkg/roachpb/span_config.proto +++ b/pkg/roachpb/span_config.proto @@ -278,15 +278,23 @@ message SpanConfigEntry { SpanConfig config = 2 [(gogoproto.nullable) = false]; }; -// SpanConfigConformanceReport lists out ranges that (i) don't conform to span -// configs that apply over them, and (ii) are unavailable. +// SpanConfigConformanceReport reports ranges that (i) don't conform to span +// configs that apply over them, and (ii) are unavailable. Also included in this +// report are the IDs of unavailable nodes (possibly contributing to +// under-replication or range-unavailability). message SpanConfigConformanceReport { - repeated RangeDescriptor under_replicated = 1 [(gogoproto.nullable) = false]; - repeated RangeDescriptor over_replicated = 2 [(gogoproto.nullable) = false]; - repeated RangeDescriptor violating_constraints = 3 [(gogoproto.nullable) = false]; - repeated RangeDescriptor unavailable = 4 [(gogoproto.nullable) = false]; + repeated ConformanceReportedRange under_replicated = 1 [(gogoproto.nullable) = false]; + repeated ConformanceReportedRange over_replicated = 2 [(gogoproto.nullable) = false]; + repeated ConformanceReportedRange violating_constraints = 3 [(gogoproto.nullable) = false]; + repeated ConformanceReportedRange unavailable = 4 [(gogoproto.nullable) = false]; + repeated int32 unavailable_node_ids = 5 [(gogoproto.customname) = "UnavailableNodeIDs"]; }; +message ConformanceReportedRange { + RangeDescriptor range_descriptor = 1 [(gogoproto.nullable) = false]; + SpanConfig config = 2 [(gogoproto.nullable) = false]; +} + // GetSpanConfigsRequest is used to fetch the span configurations and system // span configurations. message GetSpanConfigsRequest { diff --git a/pkg/rpc/auth_tenant.go b/pkg/rpc/auth_tenant.go index e8ec04ad2a97..190bebb14997 100644 --- a/pkg/rpc/auth_tenant.go +++ b/pkg/rpc/auth_tenant.go @@ -115,6 +115,9 @@ func (a tenantAuthorizer) authorize( case "/cockroach.roachpb.Internal/GetSpanConfigs": return a.authGetSpanConfigs(tenID, req.(*roachpb.GetSpanConfigsRequest)) + case "/cockroach.roachpb.Internal/SpanConfigConformance": + return a.authSpanConfigConformance(tenID, req.(*roachpb.SpanConfigConformanceRequest)) + case "/cockroach.roachpb.Internal/GetAllSystemSpanConfigsThatApply": return a.authGetAllSystemSpanConfigsThatApply(tenID, req.(*roachpb.GetAllSystemSpanConfigsThatApplyRequest)) @@ -335,6 +338,19 @@ func (a tenantAuthorizer) authUpdateSpanConfigs( return nil } +// authSpanConfigConformance authorizes the provided tenant to invoke the +// SpanConfigConformance RPC with the provided args. +func (a tenantAuthorizer) authSpanConfigConformance( + tenID roachpb.TenantID, args *roachpb.SpanConfigConformanceRequest, +) error { + for _, sp := range args.Spans { + if err := validateSpan(tenID, sp); err != nil { + return err + } + } + return nil +} + // validateSpanConfigTarget validates that the tenant is authorized to interact // with the supplied span config target. In particular, span targets must be // wholly contained within the tenant keyspace and system span config targets @@ -367,21 +383,9 @@ func validateSpanConfigTarget( return nil } - validateSpan := func(sp roachpb.Span) error { - tenSpan := tenantPrefix(tenID) - rSpan, err := keys.SpanAddr(sp) - if err != nil { - return authError(err.Error()) - } - if !tenSpan.ContainsKeyRange(rSpan.Key, rSpan.EndKey) { - return authErrorf("requested key span %s not fully contained in tenant keyspace %s", rSpan, tenSpan) - } - return nil - } - switch spanConfigTarget.Union.(type) { case *roachpb.SpanConfigTarget_Span: - return validateSpan(*spanConfigTarget.GetSpan()) + return validateSpan(tenID, *spanConfigTarget.GetSpan()) case *roachpb.SpanConfigTarget_SystemSpanConfigTarget: return validateSystemTarget(*spanConfigTarget.GetSystemSpanConfigTarget()) default: @@ -389,6 +393,18 @@ func validateSpanConfigTarget( } } +func validateSpan(tenID roachpb.TenantID, sp roachpb.Span) error { + tenSpan := tenantPrefix(tenID) + rSpan, err := keys.SpanAddr(sp) + if err != nil { + return authError(err.Error()) + } + if !tenSpan.ContainsKeyRange(rSpan.Key, rSpan.EndKey) { + return authErrorf("requested key span %s not fully contained in tenant keyspace %s", rSpan, tenSpan) + } + return nil +} + func contextWithTenant(ctx context.Context, tenID roachpb.TenantID) context.Context { ctx = roachpb.NewContextForTenant(ctx, tenID) ctx = logtags.AddTag(ctx, "tenant", tenID.String()) diff --git a/pkg/spanconfig/spanconfigreporter/BUILD.bazel b/pkg/spanconfig/spanconfigreporter/BUILD.bazel index 220924de2eb2..11e855558786 100644 --- a/pkg/spanconfig/spanconfigreporter/BUILD.bazel +++ b/pkg/spanconfig/spanconfigreporter/BUILD.bazel @@ -31,6 +31,7 @@ go_test( data = glob(["testdata/**"]), deps = [ ":spanconfigreporter", + "//pkg/keys", "//pkg/kv/kvserver/constraint", "//pkg/roachpb", "//pkg/security/securityassets", diff --git a/pkg/spanconfig/spanconfigreporter/datadriven_test.go b/pkg/spanconfig/spanconfigreporter/datadriven_test.go index 15fe3f4ac288..351097de24e8 100644 --- a/pkg/spanconfig/spanconfigreporter/datadriven_test.go +++ b/pkg/spanconfig/spanconfigreporter/datadriven_test.go @@ -17,6 +17,7 @@ import ( "strings" "testing" + "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/constraint" "github.com/cockroachdb/cockroach/pkg/roachpb" clustersettings "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -156,6 +157,9 @@ func TestDataDriven(t *testing.T) { } spans = append(spans, spanconfigtestutils.ParseSpan(t, line)) } + if len(spans) == 0 { + spans = append(spans, keys.EverythingSpan) + } report, err := reporter.SpanConfigConformance(ctx, spans) require.NoError(t, err) printRangeDesc := func(r roachpb.RangeDescriptor) string { @@ -176,13 +180,14 @@ func TestDataDriven(t *testing.T) { buf.WriteString("]") return buf.String() } - printList := func(tag string, descs []roachpb.RangeDescriptor) string { + printList := func(tag string, ranges []roachpb.ConformanceReportedRange) string { var buf strings.Builder - for i, desc := range descs { + for i, r := range ranges { if i == 0 { buf.WriteString(fmt.Sprintf("%s:\n", tag)) } - buf.WriteString(fmt.Sprintf(" %s\n", printRangeDesc(desc))) + buf.WriteString(fmt.Sprintf(" %s applying %s\n", printRangeDesc(r.RangeDescriptor), + spanconfigtestutils.PrintSpanConfigDiffedAgainstDefaults(r.Config))) } return buf.String() } @@ -251,7 +256,7 @@ func (s *mockCluster) GetStoreDescriptor(storeID roachpb.StoreID) (roachpb.Store // Iterate implements rangedesciter.Iterator. func (s *mockCluster) Iterate( - _ context.Context, _ int, _ func(), fn func(...roachpb.RangeDescriptor) error, + _ context.Context, _ int, _ func(), _ roachpb.Span, fn func(...roachpb.RangeDescriptor) error, ) error { var descs []roachpb.RangeDescriptor for _, d := range s.ranges { diff --git a/pkg/spanconfig/spanconfigreporter/reporter.go b/pkg/spanconfig/spanconfigreporter/reporter.go index ad1d7f707372..b5a2af59ec23 100644 --- a/pkg/spanconfig/spanconfigreporter/reporter.go +++ b/pkg/spanconfig/spanconfigreporter/reporter.go @@ -86,96 +86,118 @@ func New( // TODO(irfansharif): Support the equivalent of "critical localities", perhaps // through a different API than the one below since it's not quite // span-oriented. +// +// TODO(irfansharif): Once wired up the SQL code or exposed through an endpoint, +// write an end-to-end test using actual SQL and zone configs. Set configs on a +// table, disable replication, see conformance report. Enable repl, change +// configs, repeat. Do this for tenants as well. // SpanConfigConformance implements the spanconfig.Reporter interface. func (r *Reporter) SpanConfigConformance( ctx context.Context, spans []roachpb.Span, ) (roachpb.SpanConfigConformanceReport, error) { - // XXX: Actually use the spans parameter. Update the rangedesc.Iterator - // interfaces to take in a keyspan and bound meta{1,2} search just to - // segments that would possibly overlap with that keyspan. Until this - // keyspan scoping is done, we can't let this be used in tenants. - _ = spans - - // XXX: Write an end-to-end test using actual SQL and zone configs. Set configs - // on a table, disable replication, see conformance. Enable repl, change - // configs, etc. Use tenants as well for this mode. Do this for tenants as well. - // Do this after some form of this API is exposed through SQL/an endpoint. - - // XXX: Can we improve the SpanConfigConformanceReport proto type? Perhaps - // include some {meta,}data about the span config being violated as well? Or - // include the span config directly and provide helper libraries to compute - // human-readable "why is this in violation" text. - // - Only include range ID + replica descriptors + keys? - // - Type to represent exactly which constraint exactly is being violated? - // - Segment over/under replicated by what replica type (voter/non-voter) - // exactly is over/under replicated? - report := roachpb.SpanConfigConformanceReport{} - if err := r.dep.Iterate(ctx, int(rangeDescPageSize.Get(&r.settings.SV)), func() { - report = roachpb.SpanConfigConformanceReport{} // init - }, func(descriptors ...roachpb.RangeDescriptor) error { - for _, desc := range descriptors { - conf, err := r.dep.StoreReader.GetSpanConfigForKey(ctx, desc.StartKey) - if err != nil { - return err - } - - status := desc.Replicas().ReplicationStatus( - func(rDesc roachpb.ReplicaDescriptor) bool { - isLive, err := r.dep.Liveness.IsLive(rDesc.NodeID) + unavailableNodes := make(map[roachpb.NodeID]struct{}) + + for _, span := range spans { + if err := r.dep.Iterate(ctx, int(rangeDescPageSize.Get(&r.settings.SV)), + func() { report = roachpb.SpanConfigConformanceReport{} /* init */ }, + span, + func(descriptors ...roachpb.RangeDescriptor) error { + for _, desc := range descriptors { + conf, err := r.dep.StoreReader.GetSpanConfigForKey(ctx, desc.StartKey) if err != nil { - // As of 2022-10, this error only appears if we're - // asking for the liveness of a node ID that doesn't - // exist, which should never happen. Shout loudly - // and declare things as non-live. - log.Errorf(ctx, "programming error: unexpected err: %v", err) - return false + return err } - return isLive - }, int(conf.GetNumVoters()), int(conf.GetNumNonVoters())) - if !status.Available { - report.Unavailable = append(report.Unavailable, desc) - } - if status.UnderReplicated || status.UnderReplicatedNonVoters { - report.UnderReplicated = append(report.UnderReplicated, desc) - } - if status.OverReplicated || status.OverReplicatedNonVoters { - report.OverReplicated = append(report.OverReplicated, desc) - } - // Compute constraint violations for the overall (affecting voters - // and non-voters alike) and voter constraints. - overall := constraint.AnalyzeConstraints( - r.dep.StoreResolver, - desc.Replicas().Descriptors(), - conf.NumReplicas, conf.Constraints) - for i, c := range overall.Constraints { - if c.NumReplicas == 0 { - c.NumReplicas = conf.NumReplicas - } - if len(overall.SatisfiedBy[i]) < int(c.NumReplicas) { - report.ViolatingConstraints = append(report.ViolatingConstraints, desc) - break - } - } - voters := constraint.AnalyzeConstraints( - r.dep.StoreResolver, - desc.Replicas().Voters().Descriptors(), - conf.GetNumVoters(), conf.VoterConstraints) - for i, c := range voters.Constraints { - if c.NumReplicas == 0 { - c.NumReplicas = conf.GetNumVoters() - } - if len(voters.SatisfiedBy[i]) < int(c.NumReplicas) { - report.ViolatingConstraints = append(report.ViolatingConstraints, desc) - break + status := desc.Replicas().ReplicationStatus( + func(rDesc roachpb.ReplicaDescriptor) bool { + isLive, err := r.dep.Liveness.IsLive(rDesc.NodeID) + if err != nil { + // As of 2022-10, this error only appears if we're + // asking for the liveness of a node ID that doesn't + // exist, which should never happen. Shout loudly + // and declare things as non-live. + log.Errorf(ctx, "programming error: unexpected err: %v", err) + return false + } + if !isLive { + unavailableNodes[rDesc.NodeID] = struct{}{} + } + return isLive + }, int(conf.GetNumVoters()), int(conf.GetNumNonVoters())) + if !status.Available { + report.Unavailable = append(report.Unavailable, + roachpb.ConformanceReportedRange{ + RangeDescriptor: desc, + Config: conf, + }) + } + if status.UnderReplicated || status.UnderReplicatedNonVoters { + report.UnderReplicated = append(report.UnderReplicated, + roachpb.ConformanceReportedRange{ + RangeDescriptor: desc, + Config: conf, + }) + } + if status.OverReplicated || status.OverReplicatedNonVoters { + report.OverReplicated = append(report.OverReplicated, + roachpb.ConformanceReportedRange{ + RangeDescriptor: desc, + Config: conf, + }) + } + + // Compute constraint violations for the overall (affecting voters + // and non-voters alike) and voter constraints. + overall := constraint.AnalyzeConstraints( + r.dep.StoreResolver, + desc.Replicas().Descriptors(), + conf.NumReplicas, conf.Constraints) + for i, c := range overall.Constraints { + if c.NumReplicas == 0 { + // NB: This is a weird artifact of + // constraint.NumReplicas, which if set to zero is + // used to imply that the constraint will applies to + // all replicas. Setting it explicitly makes the + // code below less fragile. + c.NumReplicas = conf.NumReplicas + } + if len(overall.SatisfiedBy[i]) < int(c.NumReplicas) { + report.ViolatingConstraints = append(report.ViolatingConstraints, + roachpb.ConformanceReportedRange{ + RangeDescriptor: desc, + Config: conf, + }) + break + } + } + voters := constraint.AnalyzeConstraints( + r.dep.StoreResolver, + desc.Replicas().Voters().Descriptors(), + conf.GetNumVoters(), conf.VoterConstraints) + for i, c := range voters.Constraints { + if c.NumReplicas == 0 { + c.NumReplicas = conf.GetNumVoters() + } + if len(voters.SatisfiedBy[i]) < int(c.NumReplicas) { + report.ViolatingConstraints = append(report.ViolatingConstraints, + roachpb.ConformanceReportedRange{ + RangeDescriptor: desc, + Config: conf, + }) + break + } + } } - } + return nil + }); err != nil { + return roachpb.SpanConfigConformanceReport{}, err } - return nil - }); err != nil { - return roachpb.SpanConfigConformanceReport{}, err + } + + for nid := range unavailableNodes { + report.UnavailableNodeIDs = append(report.UnavailableNodeIDs, int32(nid)) } return report, nil } diff --git a/pkg/spanconfig/spanconfigreporter/testdata/basic b/pkg/spanconfig/spanconfigreporter/testdata/basic index 144b03dbe909..9a6e73024a32 100644 --- a/pkg/spanconfig/spanconfigreporter/testdata/basic +++ b/pkg/spanconfig/spanconfigreporter/testdata/basic @@ -45,7 +45,7 @@ n6: dead report ---- under replicated: - r1:{a-b} [(n2,s2):2, (n4,s4):4, (n6,s6):6] + r1:{a-b} [(n2,s2):2, (n4,s4):4, (n6,s6):6] applying range default liveness n4: dead @@ -54,9 +54,9 @@ n4: dead report ---- unavailable: - r1:{a-b} [(n2,s2):2, (n4,s4):4, (n6,s6):6] + r1:{a-b} [(n2,s2):2, (n4,s4):4, (n6,s6):6] applying range default under replicated: - r1:{a-b} [(n2,s2):2, (n4,s4):4, (n6,s6):6] + r1:{a-b} [(n2,s2):2, (n4,s4):4, (n6,s6):6] applying range default liveness n4: live @@ -76,7 +76,7 @@ r2: voters=[n1,n3,n5,n6] report ---- over replicated: - r2:{b-c} [(n1,s1):1, (n3,s3):3, (n5,s5):5, (n6,s6):6] + r2:{b-c} [(n1,s1):1, (n3,s3):3, (n5,s5):5, (n6,s6):6] applying range default # It should also work when we don't have enough replicas. allocate @@ -86,7 +86,7 @@ r2: voters=[n1] report ---- under replicated: - r2:{b-c} [(n1,s1):1] + r2:{b-c} [(n1,s1):1] applying range default allocate r2: voters=[n1,n3,n5] @@ -109,9 +109,9 @@ configure report ---- under replicated: - r3:{c-d} [(n1,s1):1, (n3,s3):3, (n5,s5):5] + r3:{c-d} [(n1,s1):1, (n3,s3):3, (n5,s5):5] applying range system over replicated: - r1:{a-b} [(n2,s2):2, (n4,s4):4, (n6,s6):6] + r1:{a-b} [(n2,s2):2, (n4,s4):4, (n6,s6):6] applying num_replicas=1 configure [a,d): num_replicas=3 @@ -135,7 +135,7 @@ r2: voters=[n1,n3,n5] report ---- under replicated: - r2:{b-c} [(n1,s1):1, (n3,s3):3, (n5,s5):5] + r2:{b-c} [(n1,s1):1, (n3,s3):3, (n5,s5):5] applying num_replicas=6 num_voters=3 allocate r2: voters=[n1,n2,n3,n4,n5,n6] @@ -145,9 +145,9 @@ r2: voters=[n1,n2,n3,n4,n5,n6] report ---- under replicated: - r2:{b-c} [(n1,s1):1, (n2,s2):2, (n3,s3):3, (n4,s4):4, (n5,s5):5, (n6,s6):6] + r2:{b-c} [(n1,s1):1, (n2,s2):2, (n3,s3):3, (n4,s4):4, (n5,s5):5, (n6,s6):6] applying num_replicas=6 num_voters=3 over replicated: - r2:{b-c} [(n1,s1):1, (n2,s2):2, (n3,s3):3, (n4,s4):4, (n5,s5):5, (n6,s6):6] + r2:{b-c} [(n1,s1):1, (n2,s2):2, (n3,s3):3, (n4,s4):4, (n5,s5):5, (n6,s6):6] applying num_replicas=6 num_voters=3 allocate r2: voters=[n1,n3,n5] non-voters=[n2,n4,n6] @@ -178,7 +178,7 @@ configure report ---- violating constraints: - r2:{b-c} [(n1,s1):1, (n3,s3):3, (n5,s5):5] + r2:{b-c} [(n1,s1):1, (n3,s3):3, (n5,s5):5] applying constraints=[+region=us-central:2] allocate r2: voters=[n1,n3,n4] diff --git a/pkg/spanconfig/spanconfigreporter/testdata/constraint_conformance b/pkg/spanconfig/spanconfigreporter/testdata/constraint_conformance index 942091d86e23..105512830db0 100644 --- a/pkg/spanconfig/spanconfigreporter/testdata/constraint_conformance +++ b/pkg/spanconfig/spanconfigreporter/testdata/constraint_conformance @@ -24,7 +24,7 @@ r1: voters=[n1,n2,n4] report ---- violating constraints: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n4,s4):4] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n4,s4):4] applying constraints=[+region=us-west:3] # Pin replicas to two specific DCs. A conforming replica placement should show # up as such. @@ -53,7 +53,7 @@ r1: voters=[n1] non-voters=[n3] report ---- violating constraints: - r1:{a-b} [(n1,s1):1, (n3,s3):3NON_VOTER] + r1:{a-b} [(n1,s1):1, (n3,s3):3NON_VOTER] applying num_replicas=2 num_voters=1 constraints=[+dc=dc-a:1 +dc=dc-d:1] voter_constraints=[+dc=dc-a:1] allocate r1: voters=[n1] non-voters=[n4] @@ -76,7 +76,7 @@ r1: voters=[n1,n3,n5] report ---- violating constraints: - r1:{a-b} [(n1,s1):1, (n3,s3):3, (n5,s5):5] + r1:{a-b} [(n1,s1):1, (n3,s3):3, (n5,s5):5] applying constraints=[-region=us-west] allocate r1: voters=[n4,n5,n6] diff --git a/pkg/spanconfig/spanconfigreporter/testdata/joint_consensus b/pkg/spanconfig/spanconfigreporter/testdata/joint_consensus index bc0b4e09704c..a0e87cfff9a0 100644 --- a/pkg/spanconfig/spanconfigreporter/testdata/joint_consensus +++ b/pkg/spanconfig/spanconfigreporter/testdata/joint_consensus @@ -36,7 +36,7 @@ r1: voters=[n1,n2] voters-incoming=[n3] report ---- under replicated: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3VOTER_INCOMING] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3VOTER_INCOMING] applying range default # Under-replication in the "new group". allocate @@ -46,7 +46,7 @@ r1: voters=[n1,n2] voters-outgoing=[n3] report ---- under replicated: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3VOTER_OUTGOING] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3VOTER_OUTGOING] applying range default # Under-replication in the old group because 4 is dead. allocate @@ -56,7 +56,7 @@ r1: voters=[n1,n2] voters-outgoing=[n4] voters-incoming=[n3] report ---- under replicated: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n4,s4):4VOTER_OUTGOING, (n3,s3):3VOTER_INCOMING] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n4,s4):4VOTER_OUTGOING, (n3,s3):3VOTER_INCOMING] applying range default # Unavailable in the new group (and also under-replicated), and also # over-replicated in the new group. @@ -67,11 +67,11 @@ r1: voters=[n1,n2] voters-outgoing=[n3] voters-incoming=[n4,n5] report ---- unavailable: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3VOTER_OUTGOING, (n4,s4):4VOTER_INCOMING, (n5,s5):5VOTER_INCOMING] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3VOTER_OUTGOING, (n4,s4):4VOTER_INCOMING, (n5,s5):5VOTER_INCOMING] applying range default under replicated: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3VOTER_OUTGOING, (n4,s4):4VOTER_INCOMING, (n5,s5):5VOTER_INCOMING] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3VOTER_OUTGOING, (n4,s4):4VOTER_INCOMING, (n5,s5):5VOTER_INCOMING] applying range default over replicated: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3VOTER_OUTGOING, (n4,s4):4VOTER_INCOMING, (n5,s5):5VOTER_INCOMING] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3VOTER_OUTGOING, (n4,s4):4VOTER_INCOMING, (n5,s5):5VOTER_INCOMING] applying range default # Over-replicated in the new group. allocate @@ -81,7 +81,7 @@ r1: voters=[n1,n2] voters-outgoing=[n3] voters-incoming=[n5,n6] report ---- over replicated: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3VOTER_OUTGOING, (n5,s5):5VOTER_INCOMING, (n6,s6):6VOTER_INCOMING] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3VOTER_OUTGOING, (n5,s5):5VOTER_INCOMING, (n6,s6):6VOTER_INCOMING] applying range default # Many learners. No problems, since learners don't count. @@ -101,4 +101,4 @@ r1: voters=[n1,n2] learners=[n3] report ---- under replicated: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3LEARNER] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3LEARNER] applying range default diff --git a/pkg/spanconfig/spanconfigreporter/testdata/over_under_replicated b/pkg/spanconfig/spanconfigreporter/testdata/over_under_replicated index 5d73280c2b2d..3663de56825b 100644 --- a/pkg/spanconfig/spanconfigreporter/testdata/over_under_replicated +++ b/pkg/spanconfig/spanconfigreporter/testdata/over_under_replicated @@ -31,7 +31,7 @@ r1: voters=[n1,n2,n3,n4] report ---- over replicated: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3, (n4,s4):4] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3, (n4,s4):4] applying range default # ----------------------------------------------------------------------------- # We have 1 or 2 replicas when we want 3, we're under replicated. @@ -42,7 +42,7 @@ r1: voters=[n1] report ---- under replicated: - r1:{a-b} [(n1,s1):1] + r1:{a-b} [(n1,s1):1] applying range default allocate r1: voters=[n1,n2] @@ -51,7 +51,7 @@ r1: voters=[n1,n2] report ---- under replicated: - r1:{a-b} [(n1,s1):1, (n2,s2):2] + r1:{a-b} [(n1,s1):1, (n2,s2):2] applying range default # ----------------------------------------------------------------------------- # We have the desired number of replicas, but one of them is on a dead node so @@ -67,7 +67,7 @@ r1: voters=[n1,n2,n3] report ---- under replicated: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3] applying range default # If we've lost quorum we're also unavailable. liveness @@ -78,9 +78,9 @@ n3: dead report ---- unavailable: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3] applying range default under replicated: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3] applying range default liveness n2: live @@ -113,8 +113,8 @@ n2: dead report ---- unavailable: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3] applying num_replicas=2 under replicated: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3] applying num_replicas=2 over replicated: - r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3] + r1:{a-b} [(n1,s1):1, (n2,s2):2, (n3,s3):3] applying num_replicas=2