diff --git a/pkg/ccl/cliccl/cli_test.go b/pkg/ccl/cliccl/cli_test.go index 1b59600a30b1..ce8e51c5fb40 100644 --- a/pkg/ccl/cliccl/cli_test.go +++ b/pkg/ccl/cliccl/cli_test.go @@ -118,7 +118,7 @@ func Example_cclzone() { // gc: // ttlseconds: 90000 // num_replicas: 1 - // constraints: [us-east-1a, ssd] + // constraints: [+us-east-1a, +ssd] // zone get db.t.p0 // db.t@primary // range_min_bytes: 1048576 @@ -126,7 +126,7 @@ func Example_cclzone() { // gc: // ttlseconds: 90000 // num_replicas: 1 - // constraints: [us-east-1a, ssd] + // constraints: [+us-east-1a, +ssd] // zone get db.t // .default // range_min_bytes: 1048576 @@ -149,7 +149,7 @@ func Example_cclzone() { // gc: // ttlseconds: 90000 // num_replicas: 3 - // constraints: [us-east-1a, ssd] + // constraints: [+us-east-1a, +ssd] // zone get db.t.p1 // db.t.p1 // range_min_bytes: 1048576 @@ -157,7 +157,7 @@ func Example_cclzone() { // gc: // ttlseconds: 90000 // num_replicas: 3 - // constraints: [us-east-1a, ssd] + // constraints: [+us-east-1a, +ssd] // zone get db.t.p0 // db.t@primary // range_min_bytes: 1048576 @@ -165,7 +165,7 @@ func Example_cclzone() { // gc: // ttlseconds: 90000 // num_replicas: 1 - // constraints: [us-east-1a, ssd] + // constraints: [+us-east-1a, +ssd] // zone ls // .default // .liveness @@ -190,7 +190,7 @@ func Example_cclzone() { // gc: // ttlseconds: 90000 // num_replicas: 3 - // constraints: [us-east-1a, ssd] + // constraints: [+us-east-1a, +ssd] // zone ls // .default // .liveness diff --git a/pkg/ccl/partitionccl/partition_test.go b/pkg/ccl/partitionccl/partition_test.go index 338e4e9678c5..a71eaf281d08 100644 --- a/pkg/ccl/partitionccl/partition_test.go +++ b/pkg/ccl/partitionccl/partition_test.go @@ -33,6 +33,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" "github.com/pkg/errors" + yaml "gopkg.in/yaml.v2" "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/util/hlc" @@ -169,16 +170,11 @@ func (t *partitioningTest) parse() error { } } - for _, constraintStr := range strings.Split(constraints, `,`) { - if constraintStr == "" { - continue - } - var c config.Constraint - if err := c.FromString(constraintStr); err != nil { - return errors.Wrapf(err, "parsing constraint: %s", constraintStr) - } - subzone.Config.Constraints.Constraints = append(subzone.Config.Constraints.Constraints, c) + var parsedConstraints config.ConstraintsList + if err := yaml.UnmarshalStrict([]byte("["+constraints+"]"), &parsedConstraints); err != nil { + return errors.Wrapf(err, "parsing constraints: %s", constraints) } + subzone.Config.Constraints = ([]config.Constraints)(parsedConstraints) t.parsed.subzones = append(t.parsed.subzones, subzone) } diff --git a/pkg/cli/cli_test.go b/pkg/cli/cli_test.go index 7949ad11e5c1..b71e240429c9 100644 --- a/pkg/cli/cli_test.go +++ b/pkg/cli/cli_test.go @@ -473,7 +473,7 @@ func Example_zone() { // gc: // ttlseconds: 90000 // num_replicas: 1 - // constraints: [us-east-1a, ssd] + // constraints: [+us-east-1a, +ssd] // zone ls // .default // .liveness @@ -505,7 +505,7 @@ func Example_zone() { // gc: // ttlseconds: 90000 // num_replicas: 1 - // constraints: [us-east-1a, ssd] + // constraints: [+us-east-1a, +ssd] // zone set system.descriptor --file=./testdata/zone_attrs.yaml // pq: cannot set zone configs for system config tables; try setting your config on the entire "system" database instead // zone set system.namespace --file=./testdata/zone_attrs.yaml @@ -518,7 +518,7 @@ func Example_zone() { // gc: // ttlseconds: 90000 // num_replicas: 3 - // constraints: [us-east-1a, ssd] + // constraints: [+us-east-1a, +ssd] // zone get system // system // range_min_bytes: 1048576 @@ -526,7 +526,7 @@ func Example_zone() { // gc: // ttlseconds: 90000 // num_replicas: 3 - // constraints: [us-east-1a, ssd] + // constraints: [+us-east-1a, +ssd] // zone rm system // CONFIGURE ZONE 1 // zone ls diff --git a/pkg/cli/testdata/zone_attrs.yaml b/pkg/cli/testdata/zone_attrs.yaml index 2f12a1cbe30d..7722b522409a 100644 --- a/pkg/cli/testdata/zone_attrs.yaml +++ b/pkg/cli/testdata/zone_attrs.yaml @@ -1,2 +1,2 @@ num_replicas: 1 -constraints: [us-east-1a,ssd] +constraints: [+us-east-1a,+ssd] diff --git a/pkg/config/zone.go b/pkg/config/zone.go index c3382d9768eb..9667fc3520f6 100644 --- a/pkg/config/zone.go +++ b/pkg/config/zone.go @@ -25,7 +25,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/pkg/errors" - yaml "gopkg.in/yaml.v2" ) // Several ranges outside of the SQL keyspace are given special names so they @@ -226,6 +225,9 @@ func (c Constraint) String() string { // FromString populates the constraint from the constraint shorthand notation. func (c *Constraint) FromString(short string) error { + if len(short) == 0 { + return fmt.Errorf("the empty string is not a valid constraint") + } switch short[0] { case '+': c.Type = Constraint_REQUIRED @@ -234,7 +236,7 @@ func (c *Constraint) FromString(short string) error { c.Type = Constraint_PROHIBITED short = short[1:] default: - c.Type = Constraint_POSITIVE + c.Type = Constraint_DEPRECATED_POSITIVE } parts := strings.Split(short, "=") if len(parts) == 1 { @@ -248,34 +250,6 @@ func (c *Constraint) FromString(short string) error { return nil } -var _ yaml.Marshaler = Constraints{} -var _ yaml.Unmarshaler = &Constraints{} - -// MarshalYAML implements yaml.Marshaler. -func (c Constraints) MarshalYAML() (interface{}, error) { - short := make([]string, len(c.Constraints)) - for i, c := range c.Constraints { - short[i] = c.String() - } - return short, nil -} - -// UnmarshalYAML implements yaml.Unmarshaler. -func (c *Constraints) UnmarshalYAML(unmarshal func(interface{}) error) error { - var shortConstraints []string - if err := unmarshal(&shortConstraints); err != nil { - return err - } - constraints := make([]Constraint, len(shortConstraints)) - for i, short := range shortConstraints { - if err := constraints[i].FromString(short); err != nil { - return err - } - } - c.Constraints = constraints - return nil -} - // minRangeMaxBytes is the minimum value for range max bytes. const minRangeMaxBytes = 64 << 10 // 64 KB @@ -321,8 +295,8 @@ func TestingSetDefaultZoneConfig(cfg ZoneConfig) func() { } } -// Validate returns an error if the ZoneConfig specifies a known-dangerous -// configuration. +// Validate returns an error if the ZoneConfig specifies a known-dangerous or +// disallowed configuration. func (z *ZoneConfig) Validate() error { for _, s := range z.Subzones { if err := s.Config.Validate(); err != nil { @@ -348,6 +322,43 @@ func (z *ZoneConfig) Validate() error { return fmt.Errorf("RangeMinBytes %d is greater than or equal to RangeMaxBytes %d", z.RangeMinBytes, z.RangeMaxBytes) } + + for _, constraints := range z.Constraints { + for _, constraint := range constraints.Constraints { + if constraint.Type == Constraint_DEPRECATED_POSITIVE { + return fmt.Errorf("constraints must either be required (prefixed with a '+') or " + + "prohibited (prefixed with a '-')") + } + } + } + + // We only need to further validate constraints if per-replica constraints + // are in use. The old style of constraints that apply to all replicas don't + // require validation. + if len(z.Constraints) > 1 || (len(z.Constraints) == 1 && z.Constraints[0].NumReplicas != 0) { + var numConstrainedRepls int64 + for _, constraints := range z.Constraints { + if constraints.NumReplicas <= 0 { + return fmt.Errorf("constraints must apply to at least one replica") + } + numConstrainedRepls += int64(constraints.NumReplicas) + for _, constraint := range constraints.Constraints { + if constraint.Type != Constraint_REQUIRED && constraints.NumReplicas != z.NumReplicas { + return fmt.Errorf( + "only required constraints (prefixed with a '+') can be applied to a subset of replicas") + } + if strings.Contains(constraint.Key, ":") || strings.Contains(constraint.Value, ":") { + return fmt.Errorf("the ':' character is not allowed in constraint keys or values") + } + } + } + // TODO(a-robinson): Relax this constraint, as discussed on #22412. + if numConstrainedRepls != int64(z.NumReplicas) { + return fmt.Errorf( + "the number of replicas specified in constraints (%d) does not equal the number of replicas configured for the zone (%d)", + numConstrainedRepls, z.NumReplicas) + } + } return nil } diff --git a/pkg/config/zone.pb.go b/pkg/config/zone.pb.go index 9b7e3536bb5b..e021f0677808 100644 --- a/pkg/config/zone.pb.go +++ b/pkg/config/zone.pb.go @@ -21,24 +21,24 @@ var _ = math.Inf type Constraint_Type int32 const ( - // POSITIVE will attempt to ensure all stores the replicas are on has this - // constraint. - Constraint_POSITIVE Constraint_Type = 0 - // REQUIRED is like POSITIVE except replication will fail if not satisfied. + // DEPRECATED_POSITIVE has no effect on a replica's placement. + Constraint_DEPRECATED_POSITIVE Constraint_Type = 0 + // REQUIRED ensures all replicas are placed on stores that match the + // constraint. Replication will fail if there aren't any such stores. Constraint_REQUIRED Constraint_Type = 1 // PROHIBITED will prevent replicas from having this key, value. Constraint_PROHIBITED Constraint_Type = 2 ) var Constraint_Type_name = map[int32]string{ - 0: "POSITIVE", + 0: "DEPRECATED_POSITIVE", 1: "REQUIRED", 2: "PROHIBITED", } var Constraint_Type_value = map[string]int32{ - "POSITIVE": 0, - "REQUIRED": 1, - "PROHIBITED": 2, + "DEPRECATED_POSITIVE": 0, + "REQUIRED": 1, + "PROHIBITED": 2, } func (x Constraint_Type) Enum() *Constraint_Type { @@ -92,6 +92,12 @@ func (*Constraint) Descriptor() ([]byte, []int) { return fileDescriptorZone, []i // Constraints is a collection of constraints. type Constraints struct { + // The number of replicas that should abide by the constraints. If left + // unspecified (i.e. set to 0), the constraints will be assumed to apply + // to all replicas of the range. + // As of v2.0, only REQUIRED constraints are allowed when num_replicas is + // set to a non-zero value. + NumReplicas int32 `protobuf:"varint,7,opt,name=num_replicas,json=numReplicas" json:"num_replicas"` Constraints []Constraint `protobuf:"bytes,6,rep,name=constraints" json:"constraints"` } @@ -112,7 +118,11 @@ type ZoneConfig struct { // Constraints constrains which stores the replicas can be stored on. The // order in which the constraints are stored is arbitrary and may change. // https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160706_expressive_zone_config.md#constraint-system - Constraints Constraints `protobuf:"bytes,6,opt,name=constraints" json:"constraints" yaml:"constraints,flow"` + // + // NOTE: The sum of the num_replicas fields of the Constraints must add up to + // ZoneConfig.num_replicas, or there must be no more than a single Constraints + // field with num_replicas set to 0. + Constraints []Constraints `protobuf:"bytes,6,rep,name=constraints" json:"constraints" yaml:"constraints,flow"` // Subzones stores config overrides for "subzones", each of which represents // either a SQL table index or a partition of a SQL table index. Subzones are // not applicable when the zone does not represent a SQL table (i.e., when the @@ -254,6 +264,9 @@ func (this *Constraints) Equal(that interface{}) bool { } else if this == nil { return false } + if this.NumReplicas != that1.NumReplicas { + return false + } if len(this.Constraints) != len(that1.Constraints) { return false } @@ -295,9 +308,14 @@ func (this *ZoneConfig) Equal(that interface{}) bool { if this.NumReplicas != that1.NumReplicas { return false } - if !this.Constraints.Equal(&that1.Constraints) { + if len(this.Constraints) != len(that1.Constraints) { return false } + for i := range this.Constraints { + if !this.Constraints[i].Equal(&that1.Constraints[i]) { + return false + } + } if len(this.Subzones) != len(that1.Subzones) { return false } @@ -453,6 +471,9 @@ func (m *Constraints) MarshalTo(dAtA []byte) (int, error) { i += n } } + dAtA[i] = 0x38 + i++ + i = encodeVarintZone(dAtA, i, uint64(m.NumReplicas)) return i, nil } @@ -488,14 +509,18 @@ func (m *ZoneConfig) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x28 i++ i = encodeVarintZone(dAtA, i, uint64(m.NumReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintZone(dAtA, i, uint64(m.Constraints.Size())) - n2, err := m.Constraints.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.Constraints) > 0 { + for _, msg := range m.Constraints { + dAtA[i] = 0x32 + i++ + i = encodeVarintZone(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - i += n2 if len(m.SubzoneSpans) > 0 { for _, msg := range m.SubzoneSpans { dAtA[i] = 0x3a @@ -548,11 +573,11 @@ func (m *Subzone) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintZone(dAtA, i, uint64(m.Config.Size())) - n3, err := m.Config.MarshalTo(dAtA[i:]) + n2, err := m.Config.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n2 return i, nil } @@ -598,6 +623,199 @@ func encodeVarintZone(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func NewPopulatedGCPolicy(r randyZone, easy bool) *GCPolicy { + this := &GCPolicy{} + this.TTLSeconds = int32(r.Int31()) + if r.Intn(2) == 0 { + this.TTLSeconds *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedConstraint(r randyZone, easy bool) *Constraint { + this := &Constraint{} + this.Type = Constraint_Type([]int32{0, 1, 2}[r.Intn(3)]) + this.Key = string(randStringZone(r)) + this.Value = string(randStringZone(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedConstraints(r randyZone, easy bool) *Constraints { + this := &Constraints{} + if r.Intn(10) != 0 { + v1 := r.Intn(5) + this.Constraints = make([]Constraint, v1) + for i := 0; i < v1; i++ { + v2 := NewPopulatedConstraint(r, easy) + this.Constraints[i] = *v2 + } + } + this.NumReplicas = int32(r.Int31()) + if r.Intn(2) == 0 { + this.NumReplicas *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedZoneConfig(r randyZone, easy bool) *ZoneConfig { + this := &ZoneConfig{} + this.RangeMinBytes = int64(r.Int63()) + if r.Intn(2) == 0 { + this.RangeMinBytes *= -1 + } + this.RangeMaxBytes = int64(r.Int63()) + if r.Intn(2) == 0 { + this.RangeMaxBytes *= -1 + } + v3 := NewPopulatedGCPolicy(r, easy) + this.GC = *v3 + this.NumReplicas = int32(r.Int31()) + if r.Intn(2) == 0 { + this.NumReplicas *= -1 + } + if r.Intn(10) != 0 { + v4 := r.Intn(5) + this.Constraints = make([]Constraints, v4) + for i := 0; i < v4; i++ { + v5 := NewPopulatedConstraints(r, easy) + this.Constraints[i] = *v5 + } + } + if r.Intn(10) != 0 { + v6 := r.Intn(5) + this.SubzoneSpans = make([]SubzoneSpan, v6) + for i := 0; i < v6; i++ { + v7 := NewPopulatedSubzoneSpan(r, easy) + this.SubzoneSpans[i] = *v7 + } + } + if r.Intn(10) == 0 { + v8 := r.Intn(5) + this.Subzones = make([]Subzone, v8) + for i := 0; i < v8; i++ { + v9 := NewPopulatedSubzone(r, easy) + this.Subzones[i] = *v9 + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedSubzone(r randyZone, easy bool) *Subzone { + this := &Subzone{} + this.IndexID = uint32(r.Uint32()) + this.PartitionName = string(randStringZone(r)) + v10 := NewPopulatedZoneConfig(r, easy) + this.Config = *v10 + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedSubzoneSpan(r randyZone, easy bool) *SubzoneSpan { + this := &SubzoneSpan{} + if r.Intn(10) != 0 { + v11 := r.Intn(100) + this.Key = make(github_com_cockroachdb_cockroach_pkg_roachpb.Key, v11) + for i := 0; i < v11; i++ { + this.Key[i] = byte(r.Intn(256)) + } + } + if r.Intn(10) != 0 { + v12 := r.Intn(100) + this.EndKey = make(github_com_cockroachdb_cockroach_pkg_roachpb.Key, v12) + for i := 0; i < v12; i++ { + this.EndKey[i] = byte(r.Intn(256)) + } + } + this.SubzoneIndex = int32(r.Int31()) + if r.Intn(2) == 0 { + this.SubzoneIndex *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyZone interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneZone(r randyZone) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringZone(r randyZone) string { + v13 := r.Intn(100) + tmps := make([]rune, v13) + for i := 0; i < v13; i++ { + tmps[i] = randUTF8RuneZone(r) + } + return string(tmps) +} +func randUnrecognizedZone(r randyZone, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldZone(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldZone(dAtA []byte, r randyZone, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateZone(dAtA, uint64(key)) + v14 := r.Int63() + if r.Intn(2) == 0 { + v14 *= -1 + } + dAtA = encodeVarintPopulateZone(dAtA, uint64(v14)) + case 1: + dAtA = encodeVarintPopulateZone(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateZone(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateZone(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateZone(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateZone(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} func (m *GCPolicy) Size() (n int) { var l int _ = l @@ -625,6 +843,7 @@ func (m *Constraints) Size() (n int) { n += 1 + l + sovZone(uint64(l)) } } + n += 1 + sovZone(uint64(m.NumReplicas)) return n } @@ -636,8 +855,12 @@ func (m *ZoneConfig) Size() (n int) { l = m.GC.Size() n += 1 + l + sovZone(uint64(l)) n += 1 + sovZone(uint64(m.NumReplicas)) - l = m.Constraints.Size() - n += 1 + l + sovZone(uint64(l)) + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.Size() + n += 1 + l + sovZone(uint64(l)) + } + } if len(m.SubzoneSpans) > 0 { for _, e := range m.SubzoneSpans { l = e.Size() @@ -948,6 +1171,25 @@ func (m *Constraints) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumReplicas", wireType) + } + m.NumReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowZone + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipZone(dAtA[iNdEx:]) @@ -1111,7 +1353,8 @@ func (m *ZoneConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Constraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Constraints = append(m.Constraints, Constraints{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1565,50 +1808,52 @@ var ( func init() { proto.RegisterFile("config/zone.proto", fileDescriptorZone) } var fileDescriptorZone = []byte{ - // 710 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0x3b, 0x73, 0xd3, 0x4a, - 0x14, 0x80, 0x2d, 0x3f, 0x62, 0xdd, 0x63, 0x3b, 0xf1, 0xdd, 0x7b, 0x27, 0x57, 0x37, 0x80, 0x6d, - 0x54, 0x99, 0x97, 0xcd, 0x18, 0x86, 0xc2, 0xcc, 0x50, 0xc8, 0x76, 0x82, 0x08, 0x21, 0x41, 0x36, - 0xcc, 0x90, 0x46, 0xc8, 0xf2, 0x46, 0xd1, 0xc4, 0xde, 0xd5, 0x58, 0x32, 0x44, 0xfc, 0x0a, 0x4a, - 0xca, 0x30, 0xc3, 0xbf, 0xe0, 0x0f, 0xa4, 0xa4, 0x61, 0x86, 0xca, 0x03, 0xa6, 0xa1, 0xa6, 0xa4, - 0x62, 0xb4, 0xbb, 0x7e, 0x24, 0x84, 0x14, 0x74, 0x7b, 0x5e, 0xdf, 0x79, 0xec, 0x39, 0xf0, 0xb7, - 0x4d, 0xc9, 0x9e, 0xeb, 0x54, 0x5f, 0x51, 0x82, 0x2b, 0xde, 0x90, 0x06, 0x14, 0xe5, 0x6d, 0x6a, - 0x1f, 0x0c, 0xa9, 0x65, 0xef, 0x57, 0xb8, 0x71, 0xed, 0x5f, 0x87, 0x3a, 0x94, 0x19, 0xab, 0xd1, - 0x8b, 0xfb, 0xa9, 0x2d, 0x90, 0x37, 0x1a, 0x3b, 0xb4, 0xef, 0xda, 0x21, 0xba, 0x05, 0x99, 0x20, - 0xe8, 0x9b, 0x3e, 0xb6, 0x29, 0xe9, 0xf9, 0x8a, 0x54, 0x92, 0xca, 0x29, 0x0d, 0x1d, 0x8f, 0x8b, - 0xb1, 0xc9, 0xb8, 0x08, 0x9d, 0xce, 0xc3, 0x36, 0xb7, 0x18, 0x10, 0x04, 0x7d, 0xf1, 0xae, 0x27, - 0xbf, 0x1d, 0x15, 0x25, 0xf5, 0xbd, 0x04, 0xd0, 0xa0, 0xc4, 0x0f, 0x86, 0x96, 0x4b, 0x02, 0x74, - 0x17, 0x92, 0x41, 0xe8, 0x61, 0x86, 0x58, 0xae, 0x5d, 0xae, 0x9c, 0x2e, 0xa6, 0x32, 0xf7, 0xad, - 0x74, 0x42, 0x0f, 0x6b, 0xc9, 0x28, 0x8b, 0xc1, 0x82, 0xd0, 0x2a, 0x24, 0x0e, 0x70, 0xa8, 0xc4, - 0x4b, 0x52, 0xf9, 0x2f, 0x61, 0x88, 0x14, 0x68, 0x0d, 0x52, 0x2f, 0xac, 0xfe, 0x08, 0x2b, 0x89, - 0x05, 0x0b, 0x57, 0xa9, 0x35, 0x48, 0x46, 0x1c, 0x94, 0x05, 0x79, 0x67, 0xbb, 0xad, 0x77, 0xf4, - 0xa7, 0xad, 0x7c, 0x2c, 0x92, 0x8c, 0xd6, 0xe3, 0x27, 0xba, 0xd1, 0x6a, 0xe6, 0x25, 0xb4, 0x0c, - 0xb0, 0x63, 0x6c, 0xdf, 0xd7, 0x35, 0xbd, 0xd3, 0x6a, 0xe6, 0xe3, 0x75, 0xf9, 0xcd, 0x51, 0x31, - 0xc6, 0xaa, 0x7f, 0x06, 0x99, 0x79, 0x41, 0x3e, 0x6a, 0x42, 0xc6, 0x9e, 0x8b, 0xca, 0x52, 0x29, - 0x51, 0xce, 0xd4, 0x2e, 0x9e, 0xd7, 0x84, 0x28, 0x66, 0x31, 0x4c, 0x0c, 0xe6, 0x6d, 0x12, 0x60, - 0x97, 0x12, 0xdc, 0x60, 0x21, 0x68, 0x1d, 0x56, 0x86, 0x16, 0x71, 0xb0, 0x39, 0x70, 0x89, 0xd9, - 0x0d, 0x03, 0xec, 0xb3, 0x3e, 0x13, 0x5a, 0x21, 0x02, 0x7c, 0x1f, 0x17, 0x57, 0x43, 0x6b, 0xd0, - 0xaf, 0xab, 0xa7, 0x9c, 0x54, 0x23, 0xc7, 0x34, 0x5b, 0x2e, 0xd1, 0x22, 0x79, 0x81, 0x63, 0x1d, - 0x0a, 0x4e, 0xe2, 0x1c, 0xce, 0xd4, 0x69, 0xc6, 0xb1, 0x0e, 0x39, 0xe7, 0x0e, 0xc4, 0x1d, 0x5b, - 0x49, 0x96, 0xa4, 0x72, 0xa6, 0xb6, 0xf6, 0x6b, 0x87, 0xd3, 0xd5, 0xd0, 0x40, 0x6c, 0x41, 0x7c, - 0xa3, 0x61, 0xc4, 0x1d, 0x1b, 0xdd, 0x83, 0x2c, 0x19, 0x0d, 0xcc, 0x21, 0xf6, 0xfa, 0xae, 0x6d, - 0xf9, 0x4a, 0x8a, 0xed, 0xca, 0x05, 0x91, 0xfc, 0x1f, 0x9e, 0x7c, 0xd1, 0x43, 0x35, 0x32, 0x64, - 0x34, 0x30, 0x84, 0x84, 0x9e, 0x9f, 0x1e, 0x71, 0x54, 0xc0, 0xa5, 0xf3, 0x46, 0xec, 0x6b, 0x45, - 0x41, 0xff, 0x8f, 0xd3, 0x17, 0xe2, 0xaf, 0xef, 0xf5, 0xe9, 0x4b, 0xf5, 0xc4, 0xf8, 0x51, 0x07, - 0x72, 0xfe, 0xa8, 0x1b, 0x5d, 0x84, 0xe9, 0x7b, 0x16, 0xf1, 0x95, 0x34, 0xfb, 0xc6, 0x33, 0x72, - 0xb4, 0xb9, 0x5b, 0xdb, 0xb3, 0x88, 0x96, 0x17, 0x39, 0x64, 0x9e, 0xe3, 0x86, 0x6a, 0x64, 0xfd, - 0xb9, 0xd9, 0x47, 0x1b, 0x20, 0x0b, 0xd9, 0x57, 0x64, 0x06, 0xfc, 0xff, 0xb7, 0xc0, 0x33, 0x60, - 0xb3, 0x60, 0xbe, 0x1d, 0x0f, 0x92, 0xb2, 0x94, 0x8f, 0xab, 0xef, 0x24, 0x48, 0x8b, 0x18, 0x74, - 0x15, 0x64, 0x97, 0xf4, 0xf0, 0xa1, 0xe9, 0xf6, 0xd8, 0xf5, 0xe4, 0xb4, 0x15, 0x31, 0xfa, 0xb4, - 0x1e, 0xe9, 0xf5, 0xa6, 0x91, 0x66, 0x0e, 0x7a, 0x0f, 0x5d, 0x83, 0x65, 0xcf, 0x1a, 0x06, 0x6e, - 0xe0, 0x52, 0x62, 0x12, 0x6b, 0x80, 0x4f, 0xdc, 0x4c, 0x6e, 0x66, 0x7b, 0x64, 0x0d, 0x30, 0xaa, - 0xc3, 0x12, 0x2f, 0x8f, 0x2d, 0xca, 0x99, 0xfb, 0x3c, 0xdf, 0x53, 0x81, 0x10, 0x11, 0x62, 0x95, - 0x3f, 0x4a, 0x90, 0x59, 0x98, 0x15, 0x5a, 0xe7, 0x77, 0x1a, 0x55, 0x99, 0xd5, 0x6e, 0xff, 0x18, - 0x17, 0x6f, 0x3a, 0x6e, 0xb0, 0x3f, 0xea, 0x56, 0x6c, 0x3a, 0xa8, 0xce, 0xe0, 0xbd, 0xee, 0xfc, - 0x5d, 0xf5, 0x0e, 0x9c, 0x2a, 0x7b, 0x79, 0xdd, 0xca, 0x26, 0x0e, 0xf9, 0x5d, 0x6f, 0x41, 0x1a, - 0x93, 0x9e, 0x39, 0xbd, 0xf9, 0x3f, 0x65, 0x2d, 0x61, 0xd2, 0xdb, 0xc4, 0x21, 0xba, 0x32, 0xff, - 0x78, 0x36, 0x28, 0xd6, 0x6f, 0x4a, 0x74, 0x34, 0xfd, 0x4d, 0x36, 0x4b, 0xde, 0x97, 0x56, 0x3a, - 0xfe, 0x52, 0x88, 0x1d, 0x4f, 0x0a, 0xd2, 0x87, 0x49, 0x41, 0xfa, 0x34, 0x29, 0x48, 0x9f, 0x27, - 0x05, 0xe9, 0xf5, 0xd7, 0x42, 0x6c, 0x57, 0xf4, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0xd3, 0xfe, - 0x06, 0x59, 0x60, 0x05, 0x00, 0x00, + // 740 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x3f, 0x73, 0xd3, 0x48, + 0x14, 0xf7, 0xda, 0x4e, 0xac, 0x7b, 0xb6, 0x13, 0xdf, 0xe6, 0x26, 0xf1, 0xe5, 0xee, 0x2c, 0x9f, + 0x9a, 0xf3, 0xfd, 0xb3, 0x6f, 0x72, 0x0c, 0x85, 0x19, 0x98, 0x41, 0xb6, 0x13, 0x44, 0x08, 0x31, + 0xb2, 0xa1, 0x48, 0x23, 0x64, 0x79, 0xa3, 0x68, 0x62, 0xaf, 0x34, 0x96, 0x0c, 0x11, 0x1f, 0x80, + 0x9a, 0x92, 0x32, 0x0d, 0x33, 0x7c, 0x04, 0x3e, 0x42, 0x86, 0x8a, 0x12, 0x1a, 0x0f, 0x98, 0x86, + 0x9a, 0x92, 0x8a, 0xd1, 0x6a, 0x6d, 0x29, 0x21, 0x49, 0x41, 0xb7, 0xbb, 0xef, 0xf7, 0x7e, 0xef, + 0xbd, 0xdf, 0x7b, 0x6f, 0xe1, 0x47, 0xc3, 0xa6, 0xfb, 0x96, 0x59, 0x7b, 0x62, 0x53, 0x52, 0x75, + 0x46, 0xb6, 0x67, 0xe3, 0x82, 0x61, 0x1b, 0x87, 0x23, 0x5b, 0x37, 0x0e, 0xaa, 0xa1, 0x71, 0xfd, + 0x27, 0xd3, 0x36, 0x6d, 0x66, 0xac, 0x05, 0xa7, 0x10, 0x27, 0x29, 0x20, 0x6c, 0x35, 0xda, 0xf6, + 0xc0, 0x32, 0x7c, 0xfc, 0x3f, 0x64, 0x3d, 0x6f, 0xa0, 0xb9, 0xc4, 0xb0, 0x69, 0xdf, 0x2d, 0xa2, + 0x32, 0xaa, 0x2c, 0xc8, 0xf8, 0x64, 0x22, 0x26, 0xa6, 0x13, 0x11, 0xba, 0xdd, 0x3b, 0x9d, 0xd0, + 0xa2, 0x82, 0xe7, 0x0d, 0xf8, 0xb9, 0x2e, 0xbc, 0x3a, 0x16, 0xd1, 0xa7, 0x63, 0x11, 0x49, 0xaf, + 0x11, 0x40, 0xc3, 0xa6, 0xae, 0x37, 0xd2, 0x2d, 0xea, 0xe1, 0x6b, 0x90, 0xf6, 0x7c, 0x87, 0x30, + 0x9a, 0xa5, 0x8d, 0xdf, 0xab, 0x67, 0x13, 0xaa, 0x46, 0xd8, 0x6a, 0xd7, 0x77, 0x88, 0x9c, 0x0e, + 0x22, 0xa9, 0xcc, 0x09, 0xaf, 0x42, 0xea, 0x90, 0xf8, 0xc5, 0x64, 0x19, 0x55, 0x7e, 0xe0, 0x86, + 0xe0, 0x01, 0xaf, 0xc3, 0xc2, 0x23, 0x7d, 0x30, 0x26, 0xc5, 0x54, 0xcc, 0x12, 0x3e, 0x49, 0xd7, + 0x21, 0x1d, 0xf0, 0xe0, 0x35, 0x58, 0x69, 0xb6, 0xda, 0x6a, 0xab, 0x71, 0xb3, 0xdb, 0x6a, 0x6a, + 0xed, 0xdd, 0x8e, 0xd2, 0x55, 0x1e, 0xb4, 0x0a, 0x09, 0x9c, 0x03, 0x41, 0x6d, 0xdd, 0xbb, 0xaf, + 0xa8, 0xad, 0x66, 0x01, 0xe1, 0x25, 0x80, 0xb6, 0xba, 0x7b, 0x4b, 0x91, 0x95, 0x6e, 0xab, 0x59, + 0x48, 0xd6, 0x73, 0xcf, 0x8f, 0xc5, 0xc4, 0xbc, 0x98, 0xa7, 0x08, 0xb2, 0x51, 0x82, 0x2e, 0x6e, + 0x42, 0xd6, 0x88, 0xae, 0xc5, 0xc5, 0x72, 0xaa, 0x92, 0xdd, 0xf8, 0xf5, 0xb2, 0xa2, 0x78, 0x72, + 0x71, 0x37, 0xfc, 0x07, 0xe4, 0xe8, 0x78, 0xa8, 0x8d, 0x88, 0x33, 0xb0, 0x0c, 0xdd, 0x2d, 0x66, + 0x98, 0xc4, 0x1c, 0x48, 0xc7, 0x43, 0x95, 0x1b, 0x62, 0xaa, 0xbe, 0x48, 0x03, 0xec, 0xd9, 0x94, + 0x34, 0x18, 0x3f, 0xde, 0x84, 0xe5, 0x91, 0x4e, 0x4d, 0xa2, 0x0d, 0x2d, 0xaa, 0xf5, 0x7c, 0x8f, + 0xb8, 0x4c, 0xa4, 0x94, 0x5c, 0x0a, 0x48, 0x3e, 0x4f, 0xc4, 0x55, 0x5f, 0x1f, 0x0e, 0xea, 0xd2, + 0x19, 0x90, 0xa4, 0xe6, 0xd9, 0xcb, 0x8e, 0x45, 0xe5, 0xe0, 0x1e, 0xe3, 0xd1, 0x8f, 0x38, 0x4f, + 0xea, 0x12, 0x9e, 0x19, 0x68, 0xce, 0xa3, 0x1f, 0x85, 0x3c, 0x57, 0x21, 0x69, 0x1a, 0xc5, 0x74, + 0x19, 0x55, 0xb2, 0x1b, 0xeb, 0xdf, 0xca, 0x31, 0x9b, 0x2d, 0x19, 0xf8, 0x18, 0x25, 0xb7, 0x1a, + 0x6a, 0xd2, 0x34, 0xf0, 0x8d, 0x33, 0x4a, 0x2c, 0x30, 0x25, 0x7e, 0xe1, 0xc1, 0x57, 0xc2, 0xe0, + 0x71, 0x84, 0x74, 0x4a, 0x20, 0xfc, 0xf0, 0xbc, 0x7e, 0xfc, 0x76, 0x59, 0x3f, 0x5c, 0x59, 0xe4, + 0xec, 0x6b, 0x21, 0x7b, 0xcc, 0xff, 0x9f, 0xfd, 0x81, 0xfd, 0x58, 0x3a, 0xdd, 0xab, 0x2e, 0xe4, + 0xdd, 0x71, 0x2f, 0x58, 0x29, 0xcd, 0x75, 0x74, 0x1a, 0x34, 0xeb, 0x82, 0x18, 0x9d, 0x10, 0xd6, + 0x71, 0x74, 0x2a, 0x17, 0x78, 0x0c, 0x21, 0x8c, 0xf1, 0xaf, 0xa4, 0xe6, 0xdc, 0xc8, 0xec, 0xe2, + 0x2d, 0x10, 0xf8, 0xdd, 0x2d, 0x0a, 0x8c, 0xf0, 0xe7, 0x0b, 0x09, 0xcf, 0x21, 0x9b, 0x3b, 0x47, + 0x13, 0x72, 0x3b, 0x2d, 0xa0, 0x42, 0x52, 0x7a, 0x89, 0x20, 0xc3, 0xfd, 0xf0, 0x5f, 0x20, 0x58, + 0xb4, 0x4f, 0x8e, 0x34, 0xab, 0xcf, 0xd6, 0x2f, 0x2f, 0x2f, 0x73, 0xf9, 0x33, 0x4a, 0xf0, 0xae, + 0x34, 0xd5, 0x0c, 0x03, 0x28, 0x7d, 0xfc, 0x37, 0x2c, 0x39, 0xfa, 0xc8, 0xb3, 0x3c, 0xcb, 0xa6, + 0x1a, 0xd5, 0x87, 0xe4, 0xd4, 0xd2, 0xe5, 0xe7, 0xb6, 0xbb, 0xfa, 0x90, 0xe0, 0x3a, 0x2c, 0x86, + 0x29, 0xb2, 0x61, 0x39, 0x77, 0x01, 0xa2, 0x59, 0xe5, 0x14, 0xdc, 0x23, 0x36, 0xd2, 0xef, 0x10, + 0x64, 0x63, 0x9a, 0xe1, 0xcd, 0x70, 0xd9, 0x83, 0x4c, 0x73, 0xf2, 0x95, 0x2f, 0x13, 0xf1, 0x3f, + 0xd3, 0xf2, 0x0e, 0xc6, 0xbd, 0xaa, 0x61, 0x0f, 0x6b, 0xf3, 0x00, 0xfd, 0x5e, 0x74, 0xae, 0x39, + 0x87, 0x66, 0x8d, 0x9d, 0x9c, 0x5e, 0x75, 0x9b, 0xf8, 0xe1, 0xe7, 0xb0, 0x03, 0x19, 0x42, 0xfb, + 0xda, 0xec, 0xe3, 0xf8, 0x5e, 0xae, 0x45, 0x42, 0xfb, 0xdb, 0xc4, 0xc7, 0x7f, 0x46, 0x03, 0xc0, + 0xc4, 0x62, 0x35, 0xcf, 0xb6, 0x75, 0xd6, 0x55, 0xa6, 0x67, 0x54, 0x9b, 0x5c, 0x3e, 0xf9, 0x50, + 0x4a, 0x9c, 0x4c, 0x4b, 0xe8, 0xcd, 0xb4, 0x84, 0xde, 0x4e, 0x4b, 0xe8, 0xfd, 0xb4, 0x84, 0x9e, + 0x7d, 0x2c, 0x25, 0xf6, 0xb8, 0x0e, 0x5f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x8d, 0x99, 0x9b, + 0xad, 0x05, 0x00, 0x00, } diff --git a/pkg/config/zone.proto b/pkg/config/zone.proto index eb82067d8fc7..f09a4d938caa 100644 --- a/pkg/config/zone.proto +++ b/pkg/config/zone.proto @@ -26,6 +26,7 @@ import "gogoproto/gogo.proto"; // and TTL or a union. message GCPolicy { option (gogoproto.equal) = true; + option (gogoproto.populate) = true; // TTLSeconds specifies the maximum age of a value before it's // garbage collected. Only older versions of values are garbage @@ -37,12 +38,13 @@ message GCPolicy { message Constraint { option (gogoproto.equal) = true; option (gogoproto.goproto_stringer) = false; + option (gogoproto.populate) = true; enum Type { - // POSITIVE will attempt to ensure all stores the replicas are on has this - // constraint. - POSITIVE = 0; - // REQUIRED is like POSITIVE except replication will fail if not satisfied. + // DEPRECATED_POSITIVE has no effect on a replica's placement. + DEPRECATED_POSITIVE = 0; + // REQUIRED ensures all replicas are placed on stores that match the + // constraint. Replication will fail if there aren't any such stores. REQUIRED = 1; // PROHIBITED will prevent replicas from having this key, value. PROHIBITED = 2; @@ -57,6 +59,14 @@ message Constraint { // Constraints is a collection of constraints. message Constraints { option (gogoproto.equal) = true; + option (gogoproto.populate) = true; + + // The number of replicas that should abide by the constraints. If left + // unspecified (i.e. set to 0), the constraints will be assumed to apply + // to all replicas of the range. + // As of v2.0, only REQUIRED constraints are allowed when num_replicas is + // set to a non-zero value. + optional int32 num_replicas = 7 [(gogoproto.nullable) = false]; repeated Constraint constraints = 6 [(gogoproto.nullable) = false]; } @@ -64,6 +74,7 @@ message Constraints { // ZoneConfig holds configuration that applies to one or more ranges. message ZoneConfig { option (gogoproto.equal) = true; + option (gogoproto.populate) = true; reserved 1; optional int64 range_min_bytes = 2 [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"range_min_bytes\""]; @@ -76,7 +87,11 @@ message ZoneConfig { // Constraints constrains which stores the replicas can be stored on. The // order in which the constraints are stored is arbitrary and may change. // https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160706_expressive_zone_config.md#constraint-system - optional Constraints constraints = 6 [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"constraints,flow\""]; + // + // NOTE: The sum of the num_replicas fields of the Constraints must add up to + // ZoneConfig.num_replicas, or there must be no more than a single Constraints + // field with num_replicas set to 0. + repeated Constraints constraints = 6 [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"constraints,flow\""]; // Subzones stores config overrides for "subzones", each of which represents // either a SQL table index or a partition of a SQL table index. Subzones are @@ -95,6 +110,7 @@ message ZoneConfig { message Subzone { option (gogoproto.equal) = true; + option (gogoproto.populate) = true; // IndexID is the ID of the SQL table index that the subzone represents. It // must always be set even though partition names are unique across all of a @@ -112,6 +128,7 @@ message Subzone { message SubzoneSpan { option (gogoproto.equal) = true; + option (gogoproto.populate) = true; // Key stores a key suffix that represents the inclusive lower bound for this // span. The SQL table prefix, like /Table/51/, is omitted. diff --git a/pkg/config/zone_test.go b/pkg/config/zone_test.go index 59f30650f489..96ece251731f 100644 --- a/pkg/config/zone_test.go +++ b/pkg/config/zone_test.go @@ -12,15 +12,17 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package config_test +package config import ( "fmt" + "math/rand" + "reflect" "testing" - "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/timeutil" proto "github.com/gogo/protobuf/proto" yaml "gopkg.in/yaml.v2" ) @@ -29,45 +31,99 @@ func TestZoneConfigValidate(t *testing.T) { defer leaktest.AfterTest(t)() testCases := []struct { - cfg config.ZoneConfig + cfg ZoneConfig expected string }{ { - config.ZoneConfig{}, + ZoneConfig{}, "at least one replica is required", }, { - config.ZoneConfig{ + ZoneConfig{ NumReplicas: 2, }, "at least 3 replicas are required for multi-replica configurations", }, { - config.ZoneConfig{ + ZoneConfig{ NumReplicas: 1, }, "RangeMaxBytes 0 less than minimum allowed", }, { - config.ZoneConfig{ + ZoneConfig{ NumReplicas: 1, - RangeMaxBytes: config.DefaultZoneConfig().RangeMaxBytes, + RangeMaxBytes: DefaultZoneConfig().RangeMaxBytes, }, "", }, { - config.ZoneConfig{ + ZoneConfig{ NumReplicas: 1, - RangeMinBytes: config.DefaultZoneConfig().RangeMaxBytes, - RangeMaxBytes: config.DefaultZoneConfig().RangeMaxBytes, + RangeMinBytes: DefaultZoneConfig().RangeMaxBytes, + RangeMaxBytes: DefaultZoneConfig().RangeMaxBytes, }, "is greater than or equal to RangeMaxBytes", }, + { + ZoneConfig{ + NumReplicas: 1, + RangeMaxBytes: DefaultZoneConfig().RangeMaxBytes, + Constraints: []Constraints{ + {Constraints: []Constraint{{Value: "a", Type: Constraint_DEPRECATED_POSITIVE}}}, + }, + }, + "constraints must either be required .+ or prohibited .+", + }, + { + ZoneConfig{ + NumReplicas: 1, + RangeMaxBytes: DefaultZoneConfig().RangeMaxBytes, + Constraints: []Constraints{ + { + Constraints: []Constraint{{Value: "a", Type: Constraint_REQUIRED}}, + NumReplicas: 2, + }, + }, + }, + "the number of replicas specified in constraints .+ does not equal", + }, + { + ZoneConfig{ + NumReplicas: 3, + RangeMaxBytes: DefaultZoneConfig().RangeMaxBytes, + Constraints: []Constraints{ + { + Constraints: []Constraint{{Value: "a", Type: Constraint_REQUIRED}}, + NumReplicas: 2, + }, + }, + }, + "the number of replicas specified in constraints .+ does not equal", + }, + { + ZoneConfig{ + NumReplicas: 1, + RangeMaxBytes: DefaultZoneConfig().RangeMaxBytes, + Constraints: []Constraints{ + { + Constraints: []Constraint{{Value: "a", Type: Constraint_REQUIRED}}, + NumReplicas: 0, + }, + { + Constraints: []Constraint{{Value: "b", Type: Constraint_REQUIRED}}, + NumReplicas: 1, + }, + }, + }, + "constraints must apply to at least one replica", + }, } + for i, c := range testCases { err := c.cfg.Validate() if !testutils.IsError(err, c.expected) { - t.Fatalf("%d: expected %q, got %v", i, c.expected, err) + t.Errorf("%d: expected %q, got %v", i, c.expected, err) } } } @@ -75,10 +131,10 @@ func TestZoneConfigValidate(t *testing.T) { func TestZoneConfigSubzones(t *testing.T) { defer leaktest.AfterTest(t)() - zone := config.DefaultZoneConfig() - subzoneAInvalid := config.Subzone{IndexID: 1, PartitionName: "a", Config: config.ZoneConfig{}} - subzoneA := config.Subzone{IndexID: 1, PartitionName: "a", Config: config.DefaultZoneConfig()} - subzoneB := config.Subzone{IndexID: 1, PartitionName: "b", Config: config.DefaultZoneConfig()} + zone := DefaultZoneConfig() + subzoneAInvalid := Subzone{IndexID: 1, PartitionName: "a", Config: ZoneConfig{}} + subzoneA := Subzone{IndexID: 1, PartitionName: "a", Config: DefaultZoneConfig()} + subzoneB := Subzone{IndexID: 1, PartitionName: "b", Config: DefaultZoneConfig()} if zone.IsSubzonePlaceholder() { t.Errorf("default zone config should not be considered a subzone placeholder") @@ -111,8 +167,8 @@ func TestZoneConfigSubzones(t *testing.T) { } zone.DeleteTableConfig() - if e := (config.ZoneConfig{ - Subzones: []config.Subzone{subzoneA, subzoneB}, + if e := (ZoneConfig{ + Subzones: []Subzone{subzoneA, subzoneB}, }); !e.Equal(zone) { t.Errorf("expected zone after clearing to equal %+v, but got %+v", e, zone) } @@ -133,10 +189,10 @@ func TestZoneConfigSubzones(t *testing.T) { t.Errorf("expected non-deleted subzone to equal %+v, but got %+v", &subzoneA, subzone) } - zone.SetSubzone(config.Subzone{IndexID: 2, Config: config.DefaultZoneConfig()}) - zone.SetSubzone(config.Subzone{IndexID: 2, PartitionName: "a", Config: config.DefaultZoneConfig()}) + zone.SetSubzone(Subzone{IndexID: 2, Config: DefaultZoneConfig()}) + zone.SetSubzone(Subzone{IndexID: 2, PartitionName: "a", Config: DefaultZoneConfig()}) zone.SetSubzone(subzoneB) // interleave a subzone from a different index - zone.SetSubzone(config.Subzone{IndexID: 2, PartitionName: "b", Config: config.DefaultZoneConfig()}) + zone.SetSubzone(Subzone{IndexID: 2, PartitionName: "b", Config: DefaultZoneConfig()}) if e, a := 5, len(zone.Subzones); e != a { t.Fatalf("expected %d subzones, but found %d", e, a) } @@ -163,55 +219,233 @@ func TestZoneConfigSubzones(t *testing.T) { func TestZoneConfigMarshalYAML(t *testing.T) { defer leaktest.AfterTest(t)() - original := config.ZoneConfig{ + original := ZoneConfig{ RangeMinBytes: 1, RangeMaxBytes: 1, - GC: config.GCPolicy{ + GC: GCPolicy{ TTLSeconds: 1, }, NumReplicas: 1, - Constraints: config.Constraints{ - Constraints: []config.Constraint{ + } + + testCases := []struct { + constraints []Constraints + expected string + }{ + { + constraints: nil, + expected: `range_min_bytes: 1 +range_max_bytes: 1 +gc: + ttlseconds: 1 +num_replicas: 1 +constraints: [] +`, + }, + { + constraints: []Constraints{ + { + Constraints: []Constraint{ + { + Type: Constraint_REQUIRED, + Key: "duck", + Value: "foo", + }, + }, + }, + }, + expected: `range_min_bytes: 1 +range_max_bytes: 1 +gc: + ttlseconds: 1 +num_replicas: 1 +constraints: [+duck=foo] +`, + }, + { + constraints: []Constraints{ { - Type: config.Constraint_POSITIVE, - Value: "foo", + Constraints: []Constraint{ + { + Type: Constraint_DEPRECATED_POSITIVE, + Value: "foo", + }, + { + Type: Constraint_REQUIRED, + Key: "duck", + Value: "foo", + }, + { + Type: Constraint_PROHIBITED, + Key: "duck", + Value: "foo", + }, + }, }, + }, + expected: `range_min_bytes: 1 +range_max_bytes: 1 +gc: + ttlseconds: 1 +num_replicas: 1 +constraints: [foo, +duck=foo, -duck=foo] +`, + }, + { + constraints: []Constraints{ { - Type: config.Constraint_REQUIRED, - Key: "duck", - Value: "foo", + NumReplicas: 3, + Constraints: []Constraint{ + { + Type: Constraint_REQUIRED, + Key: "duck", + Value: "foo", + }, + }, }, + }, + expected: `range_min_bytes: 1 +range_max_bytes: 1 +gc: + ttlseconds: 1 +num_replicas: 1 +constraints: {+duck=foo: 3} +`, + }, + { + constraints: []Constraints{ { - Type: config.Constraint_PROHIBITED, - Key: "duck", - Value: "foo", + NumReplicas: 3, + Constraints: []Constraint{ + { + Type: Constraint_DEPRECATED_POSITIVE, + Value: "foo", + }, + { + Type: Constraint_REQUIRED, + Key: "duck", + Value: "foo", + }, + { + Type: Constraint_PROHIBITED, + Key: "duck", + Value: "foo", + }, + }, }, }, + expected: `range_min_bytes: 1 +range_max_bytes: 1 +gc: + ttlseconds: 1 +num_replicas: 1 +constraints: {'foo,+duck=foo,-duck=foo': 3} +`, }, - } - - expected := `range_min_bytes: 1 + { + constraints: []Constraints{ + { + NumReplicas: 1, + Constraints: []Constraint{ + { + Type: Constraint_REQUIRED, + Key: "duck", + Value: "bar1", + }, + { + Type: Constraint_REQUIRED, + Key: "duck", + Value: "bar2", + }, + }, + }, + { + NumReplicas: 2, + Constraints: []Constraint{ + { + Type: Constraint_REQUIRED, + Key: "duck", + Value: "foo", + }, + }, + }, + }, + expected: `range_min_bytes: 1 range_max_bytes: 1 gc: ttlseconds: 1 num_replicas: 1 -constraints: [foo, +duck=foo, -duck=foo] -` +constraints: {'+duck=bar1,+duck=bar2': 1, +duck=foo: 2} +`, + }, + } + + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + original.Constraints = tc.constraints + body, err := yaml.Marshal(original) + if err != nil { + t.Fatal(err) + } + if string(body) != tc.expected { + t.Fatalf("yaml.Marshal(%+v)\ngot:\n%s\nwant:\n%s", original, body, tc.expected) + } - body, err := yaml.Marshal(original) - if err != nil { - t.Fatal(err) + var unmarshaled ZoneConfig + if err := yaml.UnmarshalStrict(body, &unmarshaled); err != nil { + t.Fatal(err) + } + if !proto.Equal(&unmarshaled, &original) { + t.Errorf("yaml.UnmarshalStrict(%q)\ngot:\n%+v\nwant:\n%+v", body, unmarshaled, original) + } + }) } - if string(body) != expected { - t.Fatalf("yaml.Marshal(%+v) = %s; not %s", original, body, expected) +} + +func TestConstraintsListYAML(t *testing.T) { + defer leaktest.AfterTest(t)() + + testCases := []struct { + input string + expectErr bool + }{ + {}, + {input: "[]"}, + {input: "[+a]"}, + {input: "[+a, -b=2, +c=d, e]"}, + {input: "{+a: 1}"}, + {input: "{+a: 1, '+a=1,+b,+c=d': 2}"}, + {input: "{'+a: 1'}"}, // unfortunately this parses just fine because yaml autoconverts it to a list... + {input: "{+a,+b: 1}"}, // this also parses fine but will fail ZoneConfig.Validate() + {input: "{+a: 1, '+a=1,+b,+c=d': b}", expectErr: true}, + {input: "[+a: 1]", expectErr: true}, + {input: "[+a: 1, '+a=1,+b,+c=d': 2]", expectErr: true}, } - var unmarshaled config.ZoneConfig - if err := yaml.UnmarshalStrict(body, &unmarshaled); err != nil { - t.Fatal(err) + for _, tc := range testCases { + t.Run(tc.input, func(t *testing.T) { + var constraints ConstraintsList + err := yaml.UnmarshalStrict([]byte(tc.input), &constraints) + if err == nil && tc.expectErr { + t.Errorf("expected error, but got constraints %+v", constraints) + } + if err != nil && !tc.expectErr { + t.Errorf("expected success, but got %v", err) + } + }) } - if !proto.Equal(&unmarshaled, &original) { - t.Errorf("yaml.UnmarshalStrict(%q) = %+v; not %+v", body, unmarshaled, original) +} + +func TestMarshalableZoneConfigRoundTrip(t *testing.T) { + defer leaktest.AfterTest(t)() + + original := NewPopulatedZoneConfig( + rand.New(rand.NewSource(timeutil.Now().UnixNano())), false /* easy */) + marshalable := zoneConfigToMarshalable(*original) + roundTripped := zoneConfigFromMarshalable(marshalable) + + if !reflect.DeepEqual(roundTripped, *original) { + t.Errorf("round-tripping a ZoneConfig through a marshalableZoneConfig failed:\noriginal:\n%+v\nmarshable:\n%+v\ngot:\n%+v", original, marshalable, roundTripped) } } @@ -219,16 +453,16 @@ func TestZoneSpecifiers(t *testing.T) { defer leaktest.AfterTest(t)() // Simulate exactly two named zones: one named default and one named carl. - // N.B. config.DefaultZoneName must always exist in the mapping; it is treated + // N.B. DefaultZoneName must always exist in the mapping; it is treated // specially so that it always appears first in the lookup path. - defer func(old map[string]uint32) { config.NamedZones = old }(config.NamedZones) - config.NamedZones = map[string]uint32{ - config.DefaultZoneName: 0, - "carl": 42, - } - defer func(old map[uint32]string) { config.NamedZonesByID = old }(config.NamedZonesByID) - config.NamedZonesByID = map[uint32]string{ - 0: config.DefaultZoneName, + defer func(old map[string]uint32) { NamedZones = old }(NamedZones) + NamedZones = map[string]uint32{ + DefaultZoneName: 0, + "carl": 42, + } + defer func(old map[uint32]string) { NamedZonesByID = old }(NamedZonesByID) + NamedZonesByID = map[uint32]string{ + 0: DefaultZoneName, 42: "carl", } @@ -299,18 +533,18 @@ func TestZoneSpecifiers(t *testing.T) { } { t.Run(fmt.Sprintf("parse-cli=%s", tc.cliSpecifier), func(t *testing.T) { err := func() error { - zs, err := config.ParseCLIZoneSpecifier(tc.cliSpecifier) + zs, err := ParseCLIZoneSpecifier(tc.cliSpecifier) if err != nil { return err } - id, err := config.ResolveZoneSpecifier(&zs, resolveName) + id, err := ResolveZoneSpecifier(&zs, resolveName) if err != nil { return err } if e, a := tc.id, int(id); a != e { t.Errorf("path %d did not match expected path %d", a, e) } - if e, a := tc.cliSpecifier, config.CLIZoneSpecifier(&zs); e != a { + if e, a := tc.cliSpecifier, CLIZoneSpecifier(&zs); e != a { t.Errorf("expected %q to roundtrip, but got %q", e, a) } return nil @@ -340,14 +574,14 @@ func TestZoneSpecifiers(t *testing.T) { {58, "", "58 not found"}, } { t.Run(fmt.Sprintf("resolve-id=%d", tc.id), func(t *testing.T) { - zs, err := config.ZoneSpecifierFromID(tc.id, resolveID) + zs, err := ZoneSpecifierFromID(tc.id, resolveID) if !testutils.IsError(err, tc.err) { t.Errorf("unable to lookup ID %d: %s", tc.id, err) } if tc.err != "" { return } - if e, a := tc.cliSpecifier, config.CLIZoneSpecifier(&zs); e != a { + if e, a := tc.cliSpecifier, CLIZoneSpecifier(&zs); e != a { t.Errorf("expected %q specifier for ID %d, but got %q", e, tc.id, a) } }) diff --git a/pkg/config/zone_yaml.go b/pkg/config/zone_yaml.go new file mode 100644 index 000000000000..31bef405fb54 --- /dev/null +++ b/pkg/config/zone_yaml.go @@ -0,0 +1,217 @@ +// Copyright 2018 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package config + +import ( + fmt "fmt" + "runtime/debug" + "sort" + "strings" + + yaml "gopkg.in/yaml.v2" +) + +var _ yaml.Marshaler = Constraints{} +var _ yaml.Unmarshaler = &Constraints{} + +// MarshalYAML implements yaml.Marshaler. +func (c Constraints) MarshalYAML() (interface{}, error) { + return nil, fmt.Errorf( + "MarshalYAML should never be called directly on Constraints (%v): %v", c, debug.Stack()) +} + +// UnmarshalYAML implements yaml.Marshaler. +func (c *Constraints) UnmarshalYAML(unmarshal func(interface{}) error) error { + return fmt.Errorf( + "UnmarshalYAML should never be called directly on Constraints: %v", debug.Stack()) +} + +// ConstraintsList is an alias for a slice of Constraints that can be +// properly marshaled to/from YAML. +type ConstraintsList []Constraints + +var _ yaml.Marshaler = ConstraintsList{} +var _ yaml.Unmarshaler = &ConstraintsList{} + +// MarshalYAML implements yaml.Marshaler. +// +// We use two different formats here, dependent on whether per-replica +// constraints are being used in ConstraintsList: +// 1. A legacy format when there are 0 or 1 Constraints and NumReplicas is +// zero: +// [c1, c2, c3] +// 2. A per-replica format when NumReplicas is non-zero: +// {"c1,c2,c3": numReplicas1, "c4,c5": numReplicas2} +func (c ConstraintsList) MarshalYAML() (interface{}, error) { + // If per-replica Constraints aren't in use, marshal everything into a list + // for compatibility with pre-2.0-style configs. + if len(c) == 0 { + return []string{}, nil + } + if len(c) == 1 && c[0].NumReplicas == 0 { + short := make([]string, len(c[0].Constraints)) + for i, constraint := range c[0].Constraints { + short[i] = constraint.String() + } + return short, nil + } + + // Otherwise, convert into a map from Constraints to NumReplicas. + constraintsMap := make(map[string]int32) + for _, constraints := range c { + short := make([]string, len(constraints.Constraints)) + for i, constraint := range constraints.Constraints { + short[i] = constraint.String() + } + constraintsMap[strings.Join(short, ",")] = constraints.NumReplicas + } + return constraintsMap, nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (c *ConstraintsList) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Note that we're intentionally checking for err == nil here. This handles + // unmarshaling the legacy Constraints format, which is just a list of + // strings. + var strs []string + if err := unmarshal(&strs); err == nil { + constraints := make([]Constraint, len(strs)) + for i, short := range strs { + if err := constraints[i].FromString(short); err != nil { + return err + } + } + if len(constraints) == 0 { + *c = []Constraints{} + } else { + *c = []Constraints{ + { + Constraints: constraints, + NumReplicas: 0, + }, + } + } + return nil + } + + // Otherwise, the input must be a map that can be converted to per-replica + // constraints. + constraintsMap := make(map[string]int32) + if err := unmarshal(&constraintsMap); err != nil { + return err + } + + constraintsList := make([]Constraints, 0, len(constraintsMap)) + for constraintsStr, numReplicas := range constraintsMap { + shortConstraints := strings.Split(constraintsStr, ",") + constraints := make([]Constraint, len(shortConstraints)) + for i, short := range shortConstraints { + if err := constraints[i].FromString(short); err != nil { + return err + } + } + constraintsList = append(constraintsList, Constraints{ + Constraints: constraints, + NumReplicas: numReplicas, + }) + } + + // Sort the resulting list for reproducible orderings in tests. + sort.Slice(constraintsList, func(i, j int) bool { + // First, try to find which Constraints sort first alphabetically in string + // format, considering the shorter list lesser if they're otherwise equal. + for k := range constraintsList[i].Constraints { + if k >= len(constraintsList[j].Constraints) { + return false + } + lStr := constraintsList[i].Constraints[k].String() + rStr := constraintsList[j].Constraints[k].String() + if lStr < rStr { + return true + } + if lStr > rStr { + return false + } + } + if len(constraintsList[i].Constraints) < len(constraintsList[j].Constraints) { + return true + } + // If they're completely equal and the same length, go by NumReplicas. + return constraintsList[i].NumReplicas < constraintsList[j].NumReplicas + }) + + *c = constraintsList + return nil +} + +// marshalableZoneConfig should be kept up-to-date with the real, +// auto-generated ZoneConfig type, but with []Constraints changed to +// ConstraintsList for backwards-compatible yaml marshaling and unmarshaling. +type marshalableZoneConfig struct { + RangeMinBytes int64 `protobuf:"varint,2,opt,name=range_min_bytes,json=rangeMinBytes" json:"range_min_bytes" yaml:"range_min_bytes"` + RangeMaxBytes int64 `protobuf:"varint,3,opt,name=range_max_bytes,json=rangeMaxBytes" json:"range_max_bytes" yaml:"range_max_bytes"` + GC GCPolicy `protobuf:"bytes,4,opt,name=gc" json:"gc"` + NumReplicas int32 `protobuf:"varint,5,opt,name=num_replicas,json=numReplicas" json:"num_replicas" yaml:"num_replicas"` + Constraints ConstraintsList `protobuf:"bytes,6,rep,name=constraints" json:"constraints" yaml:"constraints,flow"` + Subzones []Subzone `protobuf:"bytes,8,rep,name=subzones" json:"subzones" yaml:"-"` + SubzoneSpans []SubzoneSpan `protobuf:"bytes,7,rep,name=subzone_spans,json=subzoneSpans" json:"subzone_spans" yaml:"-"` +} + +func zoneConfigToMarshalable(c ZoneConfig) marshalableZoneConfig { + var m marshalableZoneConfig + m.RangeMinBytes = c.RangeMinBytes + m.RangeMaxBytes = c.RangeMaxBytes + m.GC = c.GC + if c.NumReplicas != 0 { + m.NumReplicas = c.NumReplicas + } + m.Constraints = ConstraintsList(c.Constraints) + m.Subzones = c.Subzones + m.SubzoneSpans = c.SubzoneSpans + return m +} + +func zoneConfigFromMarshalable(m marshalableZoneConfig) ZoneConfig { + var c ZoneConfig + c.RangeMinBytes = m.RangeMinBytes + c.RangeMaxBytes = m.RangeMaxBytes + c.GC = m.GC + c.NumReplicas = m.NumReplicas + c.Constraints = []Constraints(m.Constraints) + c.Subzones = m.Subzones + c.SubzoneSpans = m.SubzoneSpans + return c +} + +var _ yaml.Marshaler = ZoneConfig{} +var _ yaml.Unmarshaler = &ZoneConfig{} + +// MarshalYAML implements yaml.Marshaler. +func (c ZoneConfig) MarshalYAML() (interface{}, error) { + return zoneConfigToMarshalable(c), nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (c *ZoneConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Pre-initialize aux with the contents of c. This is important for + // maintaining the behavior of not overwriting existing fields unless the + // user provided new values for them. + aux := zoneConfigToMarshalable(*c) + if err := unmarshal(&aux); err != nil { + return err + } + *c = zoneConfigFromMarshalable(aux) + return nil +} diff --git a/pkg/internal/client/client_test.go b/pkg/internal/client/client_test.go index 24aa0bf83e10..2079b3dbbc07 100644 --- a/pkg/internal/client/client_test.go +++ b/pkg/internal/client/client_test.go @@ -412,7 +412,7 @@ func TestClientGetAndPutProto(t *testing.T) { zoneConfig := config.ZoneConfig{ NumReplicas: 2, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "mem"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "mem"}}}}, RangeMinBytes: 1 << 10, // 1k RangeMaxBytes: 1 << 18, // 256k } diff --git a/pkg/roachpb/metadata.go b/pkg/roachpb/metadata.go index d5749f47eb23..36b320bcb39e 100644 --- a/pkg/roachpb/metadata.go +++ b/pkg/roachpb/metadata.go @@ -59,6 +59,29 @@ func (r ReplicaID) String() string { return strconv.FormatInt(int64(r), 10) } +// Equals returns whether the Attributes lists are equivalent. Attributes lists +// are treated as sets, meaaning that ordering and duplicates are ignored. +func (a Attributes) Equals(b Attributes) bool { + // This is O(n^2), but Attribute lists should never be long enough for that + // to matter, and allocating memory every time this is called would be worse. + if len(a.Attrs) != len(b.Attrs) { + return false + } + for _, aAttr := range a.Attrs { + var found bool + for _, bAttr := range b.Attrs { + if aAttr == bAttr { + found = true + break + } + } + if !found { + return false + } + } + return true +} + // String implements the fmt.Stringer interface. func (a Attributes) String() string { return strings.Join(a.Attrs, ",") @@ -289,6 +312,22 @@ func (Locality) Type() string { return "Locality" } +// Equals returns whether the two Localities are equivalent. +// +// Because Locality Tiers are hierarchically ordered, if two Localities contain +// the same Tiers in different orders, they are not considered equal. +func (l Locality) Equals(r Locality) bool { + if len(l.Tiers) != len(r.Tiers) { + return false + } + for i := range l.Tiers { + if l.Tiers[i] != r.Tiers[i] { + return false + } + } + return true +} + // MaxDiversityScore is the largest possible diversity score, indicating that // two localities are as different from each other as possible. const MaxDiversityScore = 1.0 diff --git a/pkg/server/updates_test.go b/pkg/server/updates_test.go index 4bd8acef0a60..c71673fcb000 100644 --- a/pkg/server/updates_test.go +++ b/pkg/server/updates_test.go @@ -328,7 +328,7 @@ func TestReportUsage(t *testing.T) { "diagnostics.reporting.send_crash_reports": "false", "server.time_until_store_dead": "20s", "trace.debug.enable": "false", - "version": "1.1-13", + "version": "1.1-14", "cluster.secret": "", } { if got, ok := r.last.AlteredSettings[key]; !ok { diff --git a/pkg/settings/cluster/cockroach_versions.go b/pkg/settings/cluster/cockroach_versions.go index 2d78b5d7c58b..3527f29a846a 100644 --- a/pkg/settings/cluster/cockroach_versions.go +++ b/pkg/settings/cluster/cockroach_versions.go @@ -45,6 +45,7 @@ const ( VersionNoRaftProposalKeys VersionTxnSpanRefresh VersionReadUncommittedRangeLookups + VersionPerReplicaZoneConstraints // Add new versions here (step one of two). @@ -170,6 +171,11 @@ var versionsSingleton = keyedVersions([]keyedVersion{ Key: VersionReadUncommittedRangeLookups, Version: roachpb.Version{Major: 1, Minor: 1, Unstable: 13}, }, + { + // VersionReadUncommittedRangeLookups is https://github.com/cockroachdb/cockroach/pull/21276. + Key: VersionPerReplicaZoneConstraints, + Version: roachpb.Version{Major: 1, Minor: 1, Unstable: 14}, + }, // Add new versions here (step two of two). diff --git a/pkg/sql/logictest/testdata/logic_test/crdb_internal b/pkg/sql/logictest/testdata/logic_test/crdb_internal index 6d1872ca59bb..a5f5119ab709 100644 --- a/pkg/sql/logictest/testdata/logic_test/crdb_internal +++ b/pkg/sql/logictest/testdata/logic_test/crdb_internal @@ -209,7 +209,7 @@ select crdb_internal.set_vmodule('') query T select crdb_internal.node_executable_version() ---- -1.1-13 +1.1-14 query ITTT colnames select node_id, component, field, regexp_replace(regexp_replace(value, '^\d+$', ''), e':\\d+', ':') as value from crdb_internal.node_runtime_info @@ -291,4 +291,4 @@ select * from crdb_internal.kv_store_status query T select crdb_internal.node_executable_version() ---- -1.1-13 +1.1-14 diff --git a/pkg/sql/set_zone_config.go b/pkg/sql/set_zone_config.go index 7889c258840d..8a00a3719130 100644 --- a/pkg/sql/set_zone_config.go +++ b/pkg/sql/set_zone_config.go @@ -186,6 +186,13 @@ func writeZoneConfig( return 0, err } } + if len(zone.Constraints) > 1 || (len(zone.Constraints) == 1 && zone.Constraints[0].NumReplicas != 0) { + st := execCfg.Settings + if !st.Version.IsMinSupported(cluster.VersionPerReplicaZoneConstraints) { + return 0, errors.New( + "cluster version does not support zone configs with per-replica constraints") + } + } internalExecutor := InternalExecutor{ExecCfg: execCfg} diff --git a/pkg/sql/zone_config_test.go b/pkg/sql/zone_config_test.go index cb33bbe5acb4..1c1c375d5f5d 100644 --- a/pkg/sql/zone_config_test.go +++ b/pkg/sql/zone_config_test.go @@ -230,23 +230,23 @@ func TestGetZoneConfig(t *testing.T) { // p1: true [1, 255) db1Cfg := config.ZoneConfig{ NumReplicas: 1, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "db1"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "db1"}}}}, } tb11Cfg := config.ZoneConfig{ NumReplicas: 1, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "db1.tb1"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "db1.tb1"}}}}, } p211Cfg := config.ZoneConfig{ NumReplicas: 1, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "db2.tb1.p1"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "db2.tb1.p1"}}}}, } p212Cfg := config.ZoneConfig{ NumReplicas: 1, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "db2.tb1.p2"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "db2.tb1.p2"}}}}, } tb21Cfg := config.ZoneConfig{ NumReplicas: 1, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "db2.tb1"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "db2.tb1"}}}}, Subzones: []config.Subzone{ {PartitionName: "p0", Config: p211Cfg}, {PartitionName: "p1", Config: p212Cfg}, @@ -259,7 +259,7 @@ func TestGetZoneConfig(t *testing.T) { } p221Cfg := config.ZoneConfig{ NumReplicas: 1, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "db2.tb2.p1"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "db2.tb2.p1"}}}}, } tb22Cfg := config.ZoneConfig{ NumReplicas: 0, diff --git a/pkg/storage/allocator.go b/pkg/storage/allocator.go index 42b8922250eb..ace181986698 100644 --- a/pkg/storage/allocator.go +++ b/pkg/storage/allocator.go @@ -130,7 +130,7 @@ const ( // can be retried quickly as soon as new stores come online, or additional // space frees up. type allocatorError struct { - required []config.Constraint + constraints []config.Constraints aliveStoreCount int } @@ -138,12 +138,12 @@ func (ae *allocatorError) Error() string { var auxInfo string // Whenever the likely problem is not having enough nodes up, make the // message really clear. - if len(ae.required) == 0 { + if len(ae.constraints) == 0 { auxInfo = "; likely not enough nodes in cluster" } - return fmt.Sprintf("0 of %d store%s with attributes matching %s%s", + return fmt.Sprintf("0 of %d store%s with attributes matching %v%s", ae.aliveStoreCount, util.Pluralize(int64(ae.aliveStoreCount)), - ae.required, auxInfo) + ae.constraints, auxInfo) } func (*allocatorError) purgatoryErrorMarker() {} @@ -151,7 +151,7 @@ func (*allocatorError) purgatoryErrorMarker() {} var _ purgatoryError = &allocatorError{} // allocatorRand pairs a rand.Rand with a mutex. -// TODO: Allocator is typically only accessed from a single thread (the +// NOTE: Allocator is typically only accessed from a single thread (the // replication queue), but this assumption is broken in tests which force // replication scans. If those tests can be modified to suspend the normal // replication queue during the forced scan, then this rand could be used @@ -330,16 +330,18 @@ type decisionDetails struct { // a store. func (a *Allocator) AllocateTarget( ctx context.Context, - constraints config.Constraints, + constraints []config.Constraints, existing []roachpb.ReplicaDescriptor, rangeInfo RangeInfo, disableStatsBasedRebalancing bool, ) (*roachpb.StoreDescriptor, string, error) { sl, aliveStoreCount, throttledStoreCount := a.storePool.getStoreList(rangeInfo.Desc.RangeID, storeFilterThrottled) + analyzedConstraints := analyzeConstraints( + ctx, a.storePool.getStoreDescriptor, rangeInfo.Desc.Replicas, constraints) options := a.scorerOptions(disableStatsBasedRebalancing) candidates := allocateCandidates( - sl, constraints, existing, rangeInfo, a.storePool.getLocalities(existing), options, + sl, analyzedConstraints, existing, rangeInfo, a.storePool.getLocalities(existing), options, ) log.VEventf(ctx, 3, "allocate candidates: %s", candidates) if target := candidates.selectGood(a.randGen); target != nil { @@ -362,7 +364,7 @@ func (a *Allocator) AllocateTarget( return nil, "", errors.Errorf("%d matching stores are currently throttled", throttledStoreCount) } return nil, "", &allocatorError{ - required: constraints.Constraints, + constraints: constraints, aliveStoreCount: aliveStoreCount, } } @@ -370,7 +372,7 @@ func (a *Allocator) AllocateTarget( func (a Allocator) simulateRemoveTarget( ctx context.Context, targetStore roachpb.StoreID, - constraints config.Constraints, + constraints []config.Constraints, candidates []roachpb.ReplicaDescriptor, rangeInfo RangeInfo, disableStatsBasedRebalancing bool, @@ -394,7 +396,7 @@ func (a Allocator) simulateRemoveTarget( // replicas. func (a Allocator) RemoveTarget( ctx context.Context, - constraints config.Constraints, + constraints []config.Constraints, candidates []roachpb.ReplicaDescriptor, rangeInfo RangeInfo, disableStatsBasedRebalancing bool, @@ -410,10 +412,12 @@ func (a Allocator) RemoveTarget( } sl, _, _ := a.storePool.getStoreListFromIDs(existingStoreIDs, roachpb.RangeID(0), storeFilterNone) + analyzedConstraints := analyzeConstraints( + ctx, a.storePool.getStoreDescriptor, rangeInfo.Desc.Replicas, constraints) options := a.scorerOptions(disableStatsBasedRebalancing) rankedCandidates := removeCandidates( sl, - constraints, + analyzedConstraints, rangeInfo, a.storePool.getLocalities(rangeInfo.Desc.Replicas), options, @@ -460,7 +464,7 @@ func (a Allocator) RemoveTarget( // under-utilized store. func (a Allocator) RebalanceTarget( ctx context.Context, - constraints config.Constraints, + constraints []config.Constraints, raftStatus *raft.Status, rangeInfo RangeInfo, filter storeFilter, @@ -496,43 +500,48 @@ func (a Allocator) RebalanceTarget( } } + analyzedConstraints := analyzeConstraints( + ctx, a.storePool.getStoreDescriptor, rangeInfo.Desc.Replicas, constraints) options := a.scorerOptions(disableStatsBasedRebalancing) - existingCandidates, candidates := rebalanceCandidates( + results := rebalanceCandidates( ctx, sl, - constraints, - rangeInfo.Desc.Replicas, + analyzedConstraints, rangeInfo, a.storePool.getLocalities(rangeInfo.Desc.Replicas), + a.storePool.getNodeLocalityString, options, ) - if len(existingCandidates) == 0 || len(candidates) == 0 { + if len(results) == 0 { return nil, "" } - // Find all candidates that are better than the worst existing replica. - targets := candidates.betterThan(existingCandidates[len(existingCandidates)-1]) - target := targets.selectGood(a.randGen) - log.VEventf(ctx, 3, "rebalance candidates: %s\nexisting replicas: %s\ntarget: %s", - candidates, existingCandidates, target) - if target == nil { - return nil, "" - } + // Deep-copy the Replicas slice since we'll mutate it in the loop below. + desc := *rangeInfo.Desc + rangeInfo.Desc = &desc + + // Keep looping until we either run out of options or find a target that we're + // pretty sure we won't want to remove immediately after adding it. + // If we would, we don't want to actually rebalance to that target. + var target *candidate + var existingCandidates candidateList + for { + target, existingCandidates = bestRebalanceTarget(a.randGen, results) + if target == nil { + return nil, "" + } - // Determine whether we'll just remove the target immediately after adding it. - // If we would, we don't want to actually do the rebalance. - for len(candidates) > 0 { + // Add a fake new replica to our copy of the range descriptor so that we can + // simulate the removal logic. If we decide not to go with this target, note + // that this needs to be removed from desc before we try any other target. newReplica := roachpb.ReplicaDescriptor{ NodeID: target.store.Node.NodeID, StoreID: target.store.StoreID, ReplicaID: rangeInfo.Desc.NextReplicaID, } - - // Deep-copy the Replicas slice since we'll mutate it. - desc := *rangeInfo.Desc + desc := rangeInfo.Desc desc.Replicas = append(desc.Replicas[:len(desc.Replicas):len(desc.Replicas)], newReplica) - rangeInfo.Desc = &desc // If we can, filter replicas as we would if we were actually removing one. // If we can't (e.g. because we're the leaseholder but not the raft leader), @@ -555,18 +564,17 @@ func (a Allocator) RebalanceTarget( log.Warningf(ctx, "simulating RemoveTarget failed: %s", err) return nil, "" } - if shouldRebalanceBetween(ctx, *target, removeReplica, existingCandidates, removeDetails, options) { + if target.store.StoreID != removeReplica.StoreID { break } - // Remove the considered target from our modified RangeDescriptor and from - // the candidates list, then try again if there are any other candidates. - rangeInfo.Desc.Replicas = rangeInfo.Desc.Replicas[:len(rangeInfo.Desc.Replicas)-1] - candidates = candidates.removeCandidate(*target) - target = candidates.selectGood(a.randGen) - if target == nil { - return nil, "" - } + + log.VEventf(ctx, 2, "not rebalancing to s%d because we'd immediately remove it: %s", + target.store.StoreID, removeDetails) + desc.Replicas = desc.Replicas[:len(desc.Replicas)-1] } + + // Compile the details entry that will be persisted into system.rangelog for + // debugging/auditability purposes. details := decisionDetails{ Target: target.compactString(options), Existing: existingCandidates.compactString(options), @@ -579,45 +587,8 @@ func (a Allocator) RebalanceTarget( if err != nil { log.Warningf(ctx, "failed to marshal details for choosing rebalance target: %s", err) } - return &target.store, string(detailsBytes) -} - -// shouldRebalanceBetween returns whether it's a good idea to rebalance to the -// given `add` candidate if the replica that will be removed after adding it is -// `remove`. This is a last failsafe to ensure that we don't take unnecessary -// rebalance actions that cause thrashing. -func shouldRebalanceBetween( - ctx context.Context, - add candidate, - remove roachpb.ReplicaDescriptor, - existingCandidates candidateList, - removeDetails string, - options scorerOptions, -) bool { - if remove.StoreID == add.store.StoreID { - log.VEventf(ctx, 2, "not rebalancing to s%d because we'd immediately remove it: %s", - add.store.StoreID, removeDetails) - return false - } - // It's possible that we initially decided to rebalance based on comparing - // rebalance candidates in one locality to an existing replica in another - // locality (e.g. if one locality has many more nodes than another). This can - // make for unnecessary rebalances and even thrashing, so do a more direct - // comparison here of the replicas we'll actually be adding and removing. - for _, removeCandidate := range existingCandidates { - if removeCandidate.store.StoreID == remove.StoreID { - if removeCandidate.worthRebalancingTo(add, options) { - return true - } - log.VEventf(ctx, 2, "not rebalancing to %s because it isn't an improvement over "+ - "what we'd remove after adding it: %s", add, removeCandidate) - return false - } - } - // If the code reaches this point, remove must be a non-live store, so let the - // rebalance happen. - return true + return &target.store, string(detailsBytes) } func (a *Allocator) scorerOptions(disableStatsBasedRebalancing bool) scorerOptions { @@ -634,7 +605,7 @@ func (a *Allocator) scorerOptions(disableStatsBasedRebalancing bool) scorerOptio // unless asked to do otherwise by the checkTransferLeaseSource parameter. func (a *Allocator) TransferLeaseTarget( ctx context.Context, - constraints config.Constraints, + constraints []config.Constraints, existing []roachpb.ReplicaDescriptor, leaseStoreID roachpb.StoreID, rangeID roachpb.RangeID, @@ -729,7 +700,7 @@ func (a *Allocator) TransferLeaseTarget( // attributes. func (a *Allocator) ShouldTransferLease( ctx context.Context, - constraints config.Constraints, + constraints []config.Constraints, existing []roachpb.ReplicaDescriptor, leaseStoreID roachpb.StoreID, rangeID roachpb.RangeID, diff --git a/pkg/storage/allocator_scorer.go b/pkg/storage/allocator_scorer.go index 7ae07b46333c..d88f582aed5d 100644 --- a/pkg/storage/allocator_scorer.go +++ b/pkg/storage/allocator_scorer.go @@ -123,23 +123,19 @@ type candidate struct { store roachpb.StoreDescriptor valid bool fullDisk bool + necessary bool diversityScore float64 - preferredScore int convergesScore int balanceScore balanceDimensions rangeCount int details string } -func (c candidate) localityScore() float64 { - return c.diversityScore + float64(c.preferredScore) -} - func (c candidate) String() string { - str := fmt.Sprintf("s%d, valid:%t, fulldisk:%t, locality:%.2f, converges:%d, balance:%s, "+ - "rangeCount:%d, logicalBytes:%s, writesPerSecond:%.2f", - c.store.StoreID, c.valid, c.fullDisk, c.localityScore(), c.convergesScore, c.balanceScore, - c.rangeCount, humanizeutil.IBytes(c.store.Capacity.LogicalBytes), + str := fmt.Sprintf("s%d, valid:%t, fulldisk:%t, necessary:%t, diversity:%.2f, converges:%d, "+ + "balance:%s, rangeCount:%d, logicalBytes:%s, writesPerSecond:%.2f", + c.store.StoreID, c.valid, c.fullDisk, c.necessary, c.diversityScore, c.convergesScore, + c.balanceScore, c.rangeCount, humanizeutil.IBytes(c.store.Capacity.LogicalBytes), c.store.Capacity.WritesPerSecond) if c.details != "" { return fmt.Sprintf("%s, details:(%s)", str, c.details) @@ -156,12 +152,12 @@ func (c candidate) compactString(options scorerOptions) string { if c.fullDisk { fmt.Fprintf(&buf, ", fullDisk:%t", c.fullDisk) } + if c.necessary { + fmt.Fprintf(&buf, ", necessary:%t", c.necessary) + } if c.diversityScore != 0 { fmt.Fprintf(&buf, ", diversity:%.2f", c.diversityScore) } - if c.preferredScore != 0 { - fmt.Fprintf(&buf, ", preferred:%d", c.preferredScore) - } fmt.Fprintf(&buf, ", converges:%d, balance:%s, rangeCount:%d", c.convergesScore, c.balanceScore.compactString(options), c.rangeCount) if options.statsBasedRebalancingEnabled { @@ -176,69 +172,55 @@ func (c candidate) compactString(options scorerOptions) string { // less returns true if o is a better fit for some range than c is. func (c candidate) less(o candidate) bool { - if !o.valid { - return false - } - if !c.valid { - return true - } - if o.fullDisk { - return false - } - if c.fullDisk { - return true - } - if c.localityScore() != o.localityScore() { - return c.localityScore() < o.localityScore() - } - if c.convergesScore != o.convergesScore { - return c.convergesScore < o.convergesScore - } - if c.balanceScore.totalScore() != o.balanceScore.totalScore() { - return c.balanceScore.totalScore() < o.balanceScore.totalScore() - } - return c.rangeCount > o.rangeCount + return c.compare(o) < 0 } -// worthRebalancingTo returns true if o is enough of a better fit for some -// range than c is that it's worth rebalancing from c to o. -func (c candidate) worthRebalancingTo(o candidate, options scorerOptions) bool { +// compare is analogous to strcmp in C or string::compare in C++ -- it returns +// a positive result if c is a better fit for the range than o, 0 if they're +// equivalent, or a negative result if o is a better fit than c. The magnitude +// of the result reflects some rough idea of how much better the better +// candidate is. +func (c candidate) compare(o candidate) float64 { if !o.valid { - return false + return 6 } if !c.valid { - return true + return -6 } if o.fullDisk { - return false + return 5 } if c.fullDisk { - return true + return -5 } - if c.localityScore() != o.localityScore() { - return c.localityScore() < o.localityScore() + if c.necessary != o.necessary { + if c.necessary { + return 4 + } + return -4 + } + if c.diversityScore != o.diversityScore { + if c.diversityScore > o.diversityScore { + return 3 + } + return -3 } if c.convergesScore != o.convergesScore { - return c.convergesScore < o.convergesScore + if c.convergesScore > o.convergesScore { + return 2 + float64(c.convergesScore-o.convergesScore)/10.0 + } + return -(2 + float64(o.convergesScore-c.convergesScore)/10.0) } - // You might intuitively think that we should require o's balanceScore to - // be considerably higher than c's balanceScore, but that will effectively - // rule out rebalancing in clusters where one locality is much larger or - // smaller than the others, since all the stores in that locality will tend - // to have either a maximal or minimal balanceScore. if c.balanceScore.totalScore() != o.balanceScore.totalScore() { - return c.balanceScore.totalScore() < o.balanceScore.totalScore() - } - // Instead, just require a gap between their number of ranges. This isn't - // great, particularly for stats-based rebalancing, but it only breaks - // balanceScore ties and it's a workable stop-gap on the way to something - // like #20751. - avgRangeCount := float64(c.rangeCount+o.rangeCount) / 2.0 - // Use an overfullThreshold that is at least a couple replicas larger than - // the average of the two, to ensure that we don't keep rebalancing back - // and forth between nodes that only differ by one or two replicas. - overfullThreshold := math.Max(overfullRangeThreshold(options, avgRangeCount), avgRangeCount+1.5) - return float64(c.rangeCount) > overfullThreshold + if c.balanceScore.totalScore() > o.balanceScore.totalScore() { + return 1 + (c.balanceScore.totalScore()-o.balanceScore.totalScore())/10.0 + } + return -(1 + (o.balanceScore.totalScore()-c.balanceScore.totalScore())/10.0) + } + if c.rangeCount < o.rangeCount { + return float64(o.rangeCount-c.rangeCount) / float64(o.rangeCount) + } + return -float64(c.rangeCount-o.rangeCount) / float64(c.rangeCount) } type candidateList []candidate @@ -287,10 +269,11 @@ var _ sort.Interface = byScoreAndID(nil) func (c byScoreAndID) Len() int { return len(c) } func (c byScoreAndID) Less(i, j int) bool { - if c[i].localityScore() == c[j].localityScore() && + if c[i].diversityScore == c[j].diversityScore && c[i].convergesScore == c[j].convergesScore && c[i].balanceScore.totalScore() == c[j].balanceScore.totalScore() && c[i].rangeCount == c[j].rangeCount && + c[i].necessary == c[j].necessary && c[i].fullDisk == c[j].fullDisk && c[i].valid == c[j].valid { return c[i].store.StoreID < c[j].store.StoreID @@ -318,8 +301,9 @@ func (cl candidateList) best() candidateList { return cl } for i := 1; i < len(cl); i++ { - if cl[i].localityScore() < cl[0].localityScore() || - (cl[i].localityScore() == cl[len(cl)-1].localityScore() && + if cl[i].necessary != cl[0].necessary || + cl[i].diversityScore < cl[0].diversityScore || + (cl[i].diversityScore == cl[len(cl)-1].diversityScore && cl[i].convergesScore < cl[len(cl)-1].convergesScore) { return cl[:i] } @@ -349,10 +333,11 @@ func (cl candidateList) worst() candidateList { } } } - // Find the worst constraint values. + // Find the worst constraint/locality/converges values. for i := len(cl) - 2; i >= 0; i-- { - if cl[i].localityScore() > cl[len(cl)-1].localityScore() || - (cl[i].localityScore() == cl[len(cl)-1].localityScore() && + if cl[i].necessary != cl[len(cl)-1].necessary || + cl[i].diversityScore > cl[len(cl)-1].diversityScore || + (cl[i].diversityScore == cl[len(cl)-1].diversityScore && cl[i].convergesScore > cl[len(cl)-1].convergesScore) { return cl[i+1:] } @@ -431,7 +416,7 @@ func (cl candidateList) removeCandidate(c candidate) candidateList { // stores that meet the criteria are included in the list. func allocateCandidates( sl StoreList, - constraints config.Constraints, + constraints analyzedConstraints, existing []roachpb.ReplicaDescriptor, rangeInfo RangeInfo, existingNodeLocalities map[roachpb.NodeID]roachpb.Locality, @@ -442,8 +427,8 @@ func allocateCandidates( if !preexistingReplicaCheck(s.Node.NodeID, existing) { continue } - constraintsOk, preferredMatched := constraintCheck(s, constraints) - if !constraintsOk { + constraintsOK, necessary := allocateConstraintsCheck(s, constraints) + if !constraintsOK { continue } if !maxCapacityCheck(s) { @@ -453,9 +438,9 @@ func allocateCandidates( balanceScore := balanceScore(sl, s.Capacity, rangeInfo, options) candidates = append(candidates, candidate{ store: s, - valid: true, + valid: constraintsOK, + necessary: necessary, diversityScore: diversityScore, - preferredScore: preferredMatched, balanceScore: balanceScore, rangeCount: int(s.Capacity.RangeCount), }) @@ -473,19 +458,20 @@ func allocateCandidates( // marked as not valid, are in violation of a required criteria. func removeCandidates( sl StoreList, - constraints config.Constraints, + constraints analyzedConstraints, rangeInfo RangeInfo, existingNodeLocalities map[roachpb.NodeID]roachpb.Locality, options scorerOptions, ) candidateList { var candidates candidateList for _, s := range sl.stores { - constraintsOk, preferredMatched := constraintCheck(s, constraints) - if !constraintsOk { + constraintsOK, necessary := removeConstraintsCheck(s, constraints) + if !constraintsOK { candidates = append(candidates, candidate{ - store: s, - valid: false, - details: "constraint check fail", + store: s, + valid: false, + necessary: necessary, + details: "constraint check fail", }) continue } @@ -502,10 +488,10 @@ func removeCandidates( } candidates = append(candidates, candidate{ store: s, - valid: true, + valid: constraintsOK, + necessary: necessary, fullDisk: !maxCapacityCheck(s), diversityScore: diversityScore, - preferredScore: preferredMatched, convergesScore: convergesScore, balanceScore: balanceScore, rangeCount: int(s.Capacity.RangeCount), @@ -519,143 +505,323 @@ func removeCandidates( return candidates } +type rebalanceOptions struct { + existingCandidates candidateList + candidates candidateList +} + // rebalanceCandidates creates two candidate lists. The first contains all // existing replica's stores, ordered from least qualified for rebalancing to // most qualified. The second list is of all potential stores that could be // used as rebalancing receivers, ordered from best to worst. func rebalanceCandidates( ctx context.Context, - sl StoreList, - constraints config.Constraints, - existing []roachpb.ReplicaDescriptor, + allStores StoreList, + constraints analyzedConstraints, rangeInfo RangeInfo, existingNodeLocalities map[roachpb.NodeID]roachpb.Locality, + localityLookupFn func(roachpb.NodeID) string, options scorerOptions, -) (candidateList, candidateList) { - // Load the exiting storesIDs into a map to eliminate having to loop - // through the existing descriptors more than once. - existingStoreIDs := make(map[roachpb.StoreID]struct{}) - for _, repl := range existing { - existingStoreIDs[repl.StoreID] = struct{}{} +) []rebalanceOptions { + // 1. Determine whether existing replicas are valid and/or necessary. + type existingStore struct { + cand candidate + localityStr string + } + existingStores := make(map[roachpb.StoreID]existingStore) + var needRebalanceFrom bool + curDiversityScore := rangeDiversityScore(existingNodeLocalities) + for _, store := range allStores.stores { + for _, repl := range rangeInfo.Desc.Replicas { + if store.StoreID != repl.StoreID { + continue + } + valid, necessary := removeConstraintsCheck(store, constraints) + fullDisk := !maxCapacityCheck(store) + if !valid { + if !needRebalanceFrom { + log.VEventf(ctx, 2, "s%d: should-rebalance(invalid): locality:%q", + store.StoreID, store.Node.Locality) + } + needRebalanceFrom = true + } + if fullDisk { + if !needRebalanceFrom { + log.VEventf(ctx, 2, "s%d: should-rebalance(full-disk): capacity:%q", + store.StoreID, store.Capacity) + } + needRebalanceFrom = true + } + existingStores[store.StoreID] = existingStore{ + cand: candidate{ + store: store, + valid: valid, + necessary: necessary, + fullDisk: fullDisk, + diversityScore: curDiversityScore, + }, + localityStr: localityLookupFn(store.Node.NodeID), + } + } } - // Go through all the stores and find all that match the constraints so that - // we can have accurate stats for rebalance calculations. - var constraintsOkStoreDescriptors []roachpb.StoreDescriptor - - type constraintInfo struct { - ok bool - matched int - } - storeInfos := make(map[roachpb.StoreID]constraintInfo) - var rebalanceConstraintsCheck bool - for _, s := range sl.stores { - constraintsOk, preferredMatched := constraintCheck(s, constraints) - storeInfos[s.StoreID] = constraintInfo{ok: constraintsOk, matched: preferredMatched} - _, exists := existingStoreIDs[s.StoreID] - if constraintsOk { - constraintsOkStoreDescriptors = append(constraintsOkStoreDescriptors, s) - } else if exists { - rebalanceConstraintsCheck = true - log.VEventf(ctx, 2, "must rebalance from s%d due to constraint check", s.StoreID) + // 2. For each store, determine the stores that would be the best + // replacements on the basis of constraints, disk fullness, and diversity. + // Only the best should be included when computing balanceScores, since it + // isn't fair to compare the fullness of stores in a valid/necessary/diverse + // locality to those in an invalid/unnecessary/nondiverse locality (see + // #20751). Along the way, determine whether rebalance is needed to improve + // the range along these critical dimensions. + // + // This creates groups of stores that are valid to compare with each other. + // For example, if a range has a replica in localities A, B, and C, it's ok + // to compare other stores in locality A with the existing store in locality + // A, but would be bad for diversity if we were to compare them to the + // existing stores in localities B and C (see #20751 for more background). + // + // NOTE: We can't just do this once per localityStr because constraints can + // also include node Attributes or store Attributes. We could try to group + // stores by attributes as well, but it's simplest to just run this for each + // store. + type comparableStoreList struct { + existing []roachpb.StoreDescriptor + sl StoreList + candidates candidateList + } + var comparableStores []comparableStoreList + var needRebalanceTo bool + for _, existing := range existingStores { + // If this store is equivalent in both Locality and Node/Store Attributes to + // some other existing store, then we can treat them the same. We have to + // include Node/Store Attributes because they affect constraints. + var matchedOtherExisting bool + for i, stores := range comparableStores { + if sameLocalityAndAttrs(stores.existing[0], existing.cand.store) { + comparableStores[i].existing = append(comparableStores[i].existing, existing.cand.store) + matchedOtherExisting = true + break + } } + if matchedOtherExisting { + continue + } + var comparableCands candidateList + for _, store := range allStores.stores { + constraintsOK, necessary := rebalanceFromConstraintsCheck( + store, existing.cand.store.StoreID, constraints) + maxCapacityOK := maxCapacityCheck(store) + diversityScore := diversityRebalanceFromScore( + store, existing.cand.store.Node.NodeID, existingNodeLocalities) + cand := candidate{ + store: store, + valid: constraintsOK, + necessary: necessary, + fullDisk: !maxCapacityOK, + diversityScore: diversityScore, + } + if !cand.less(existing.cand) { + comparableCands = append(comparableCands, cand) + if !needRebalanceFrom && !needRebalanceTo && existing.cand.less(cand) { + needRebalanceTo = true + log.VEventf(ctx, 2, "s%dshould-rebalance(necessary/diversity=s%d): oldNecessary:%t, newNecessary:%t, oldDiversity:%f, newDiversity:%f, locality:%q", + existing.cand.store.StoreID, store.StoreID, existing.cand.necessary, cand.necessary, + existing.cand.diversityScore, cand.diversityScore, store.Node.Locality) + } + } + } + if options.deterministic { + sort.Sort(sort.Reverse(byScoreAndID(comparableCands))) + } else { + sort.Sort(sort.Reverse(byScore(comparableCands))) + } + bestCands := comparableCands.best() + bestStores := make([]roachpb.StoreDescriptor, len(bestCands)) + for i := range bestCands { + bestStores[i] = bestCands[i].store + } + comparableStores = append(comparableStores, comparableStoreList{ + existing: []roachpb.StoreDescriptor{existing.cand.store}, + sl: makeStoreList(bestStores), + candidates: bestCands, + }) } - constraintsOkStoreList := makeStoreList(constraintsOkStoreDescriptors) + // 3. Decide whether we should try to rebalance. Note that for each existing + // store, we only compare its fullness stats to the stats of "comparable" + // stores, i.e. those stores that at least as valid, necessary, and diverse + // as the existing store. + needRebalance := needRebalanceFrom || needRebalanceTo var shouldRebalanceCheck bool - if !rebalanceConstraintsCheck { - for _, store := range sl.stores { - if _, ok := existingStoreIDs[store.StoreID]; ok { - if shouldRebalance(ctx, store, constraintsOkStoreList, rangeInfo, existingNodeLocalities, options) { - shouldRebalanceCheck = true - break + if !needRebalance { + for _, existing := range existingStores { + var sl StoreList + outer: + for _, comparable := range comparableStores { + for _, existingCand := range comparable.existing { + if existing.cand.store.StoreID == existingCand.StoreID { + sl = comparable.sl + break outer + } } } + // TODO(a-robinson): Some moderate refactoring could extract this logic out + // into the loop below, avoiding duplicate balanceScore calculations. + if shouldRebalance(ctx, existing.cand.store, sl, rangeInfo, options) { + shouldRebalanceCheck = true + break + } } } - - // Only rebalance away if the constraints don't match or shouldRebalance - // indicated that we should consider moving the range away from one of its - // existing stores. - if !rebalanceConstraintsCheck && !shouldRebalanceCheck { - return nil, nil + if !needRebalance && !shouldRebalanceCheck { + return nil } - var existingCandidates candidateList - var candidates candidateList - for _, s := range sl.stores { - storeInfo := storeInfos[s.StoreID] - maxCapacityOK := maxCapacityCheck(s) - if _, ok := existingStoreIDs[s.StoreID]; ok { - if !storeInfo.ok { - existingCandidates = append(existingCandidates, candidate{ - store: s, - valid: false, - details: "constraint check fail", - }) + // 4. Create sets of rebalance options, i.e. groups of candidate stores and + // the existing replicas that they could legally replace in the range. We + // have to make a separate set of these for each group of comparableStores. + results := make([]rebalanceOptions, 0, len(comparableStores)) + for _, comparable := range comparableStores { + var existingCandidates candidateList + var candidates candidateList + for _, existingDesc := range comparable.existing { + existing, ok := existingStores[existingDesc.StoreID] + if !ok { + log.Errorf(ctx, "BUG: missing candidate for existing store %+v; stores: %+v", + existingDesc, existingStores) continue } - diversityScore := diversityRemovalScore(s.Node.NodeID, existingNodeLocalities) - balanceScore := balanceScore(sl, s.Capacity, rangeInfo, options) + if !existing.cand.valid { + existing.cand.details = "constraint check fail" + existingCandidates = append(existingCandidates, existing.cand) + continue + } + balanceScore := balanceScore(comparable.sl, existing.cand.store.Capacity, rangeInfo, options) var convergesScore int - if !rebalanceFromConvergesOnMean(sl, s.Capacity, rangeInfo, options) { + if !rebalanceFromConvergesOnMean(comparable.sl, existing.cand.store.Capacity, rangeInfo, options) { // Similarly to in removeCandidates, any replica whose removal // would not converge the range stats to their means is given a // constraint score boost of 1 to make it less attractive for // removal. convergesScore = 1 } - existingCandidates = append(existingCandidates, candidate{ - store: s, - valid: true, - fullDisk: !maxCapacityOK, - diversityScore: diversityScore, - preferredScore: storeInfo.matched, - convergesScore: convergesScore, - balanceScore: balanceScore, - rangeCount: int(s.Capacity.RangeCount), - }) - } else { - if !storeInfo.ok || !maxCapacityOK || !rebalanceToMaxCapacityCheck(s) { + existing.cand.convergesScore = convergesScore + existing.cand.balanceScore = balanceScore + existing.cand.rangeCount = int(existing.cand.store.Capacity.RangeCount) + existingCandidates = append(existingCandidates, existing.cand) + } + + for _, cand := range comparable.candidates { + // We handled the possible candidates for removal above. Don't process + // anymore here. + if _, ok := existingStores[cand.store.StoreID]; ok { continue } - balanceScore := balanceScore(sl, s.Capacity, rangeInfo, options) - var convergesScore int - if rebalanceToConvergesOnMean(sl, s.Capacity, rangeInfo, options) { + // We already computed valid, necessary, fullDisk, and diversityScore + // above, but recompute fullDisk using special rebalanceTo logic for + // rebalance candidates. + s := cand.store + cand.fullDisk = !rebalanceToMaxCapacityCheck(s) + cand.balanceScore = balanceScore(comparable.sl, s.Capacity, rangeInfo, options) + if rebalanceToConvergesOnMean(comparable.sl, s.Capacity, rangeInfo, options) { // This is the counterpart of !rebalanceFromConvergesOnMean from // the existing candidates. Candidates whose addition would // converge towards the range count mean are promoted. - convergesScore = 1 - } else if !rebalanceConstraintsCheck { - // Only consider this candidate if we must rebalance due to a - // constraint check requirements. + cand.convergesScore = 1 + } else if !needRebalance { + // Only consider this candidate if we must rebalance due to constraint, + // disk fullness, or diversity reasons. log.VEventf(ctx, 3, "not considering %+v as a candidate for range %+v: score=%s storeList=%+v", - s, rangeInfo, balanceScore, sl) + s, rangeInfo, cand.balanceScore, comparable.sl) continue } - diversityScore := diversityRebalanceScore(s, existingNodeLocalities) - candidates = append(candidates, candidate{ - store: s, - valid: true, - fullDisk: !maxCapacityOK, - diversityScore: diversityScore, - preferredScore: storeInfo.matched, - convergesScore: convergesScore, - balanceScore: balanceScore, - rangeCount: int(s.Capacity.RangeCount), - }) + cand.rangeCount = int(s.Capacity.RangeCount) + candidates = append(candidates, cand) } + + if len(existingCandidates) == 0 || len(candidates) == 0 { + continue + } + + if options.deterministic { + sort.Sort(sort.Reverse(byScoreAndID(existingCandidates))) + sort.Sort(sort.Reverse(byScoreAndID(candidates))) + } else { + sort.Sort(sort.Reverse(byScore(existingCandidates))) + sort.Sort(sort.Reverse(byScore(candidates))) + } + + // Only return candidates better than the worst existing replica. + improvementCandidates := candidates.betterThan(existingCandidates[len(existingCandidates)-1]) + if len(improvementCandidates) == 0 { + continue + } + results = append(results, rebalanceOptions{ + existingCandidates: existingCandidates, + candidates: improvementCandidates, + }) + log.VEventf(ctx, 5, "rebalance candidates #%d: %s\nexisting replicas: %s", + len(results), results[len(results)-1].candidates, results[len(results)-1].existingCandidates) } - if options.deterministic { - sort.Sort(sort.Reverse(byScoreAndID(existingCandidates))) - sort.Sort(sort.Reverse(byScoreAndID(candidates))) - } else { - sort.Sort(sort.Reverse(byScore(existingCandidates))) - sort.Sort(sort.Reverse(byScore(candidates))) + return results +} + +// bestRebalanceTarget returns the best target to try to rebalance to out of +// the provided options, and removes it from the relevant candidate list. +// Also returns the existing replicas that the chosen candidate was compared to. +// Returns nil if there are no more targets worth rebalancing to. +func bestRebalanceTarget( + randGen allocatorRand, options []rebalanceOptions, +) (*candidate, candidateList) { + bestIdx := -1 + var bestTarget *candidate + var replaces candidate + for i, option := range options { + if len(option.candidates) == 0 { + continue + } + target := option.candidates.selectGood(randGen) + existing := option.existingCandidates[len(option.existingCandidates)-1] + if betterRebalanceTarget(target, &existing, bestTarget, &replaces) == target { + bestIdx = i + bestTarget = target + replaces = existing + } + } + if bestIdx == -1 { + return nil, nil } + // Copy the selected target out of the candidates slice before modifying + // the slice. Without this, the returned pointer likely will be pointing + // to a different candidate than intended due to movement within the slice. + copiedTarget := *bestTarget + options[bestIdx].candidates = options[bestIdx].candidates.removeCandidate(copiedTarget) + return &copiedTarget, options[bestIdx].existingCandidates +} - return existingCandidates, candidates +// betterRebalanceTarget returns whichever of target1 or target2 is a larger +// improvement over its corresponding existing replica that it will be +// replacing in the range. +func betterRebalanceTarget(target1, existing1, target2, existing2 *candidate) *candidate { + if target2 == nil { + return target1 + } + // Try to pick whichever target is a larger improvement over the replica that + // they'll replace. + comp1 := target1.compare(*existing1) + comp2 := target2.compare(*existing2) + if comp1 > comp2 { + return target1 + } + if comp1 < comp2 { + return target2 + } + // If the two targets are equally better than their corresponding existing + // replicas, just return whichever target is better. + if target1.less(*target2) { + return target2 + } + return target1 } // shouldRebalance returns whether the specified store is a candidate for @@ -665,30 +831,8 @@ func shouldRebalance( store roachpb.StoreDescriptor, sl StoreList, rangeInfo RangeInfo, - existingNodeLocalities map[roachpb.NodeID]roachpb.Locality, options scorerOptions, ) bool { - // Rebalance if this store is too full. - if !maxCapacityCheck(store) { - log.VEventf(ctx, 2, "s%d: should-rebalance(disk-full): fraction-used=%.2f, capacity=(%v)", - store.StoreID, store.Capacity.FractionUsed(), store.Capacity) - return true - } - - diversityScore := rangeDiversityScore(existingNodeLocalities) - for _, desc := range sl.stores { - if !preexistingReplicaCheck(desc.Node.NodeID, rangeInfo.Desc.Replicas) { - continue - } - otherScore := diversityRebalanceFromScore(desc, store.Node.NodeID, existingNodeLocalities) - if otherScore > diversityScore { - log.VEventf(ctx, 2, - "s%d: should-rebalance(better-diversity=s%d): diversityScore=%.5f, otherScore=%.5f", - store.StoreID, desc.StoreID, diversityScore, otherScore) - return true - } - } - if !options.statsBasedRebalancingEnabled { return shouldRebalanceNoStats(ctx, store, sl, options) } @@ -785,6 +929,19 @@ func preexistingReplicaCheck(nodeID roachpb.NodeID, existing []roachpb.ReplicaDe return true } +func sameLocalityAndAttrs(s1, s2 roachpb.StoreDescriptor) bool { + if !s1.Node.Locality.Equals(s2.Node.Locality) { + return false + } + if !s1.Node.Attrs.Equals(s2.Node.Attrs) { + return false + } + if !s1.Attrs.Equals(s2.Attrs) { + return false + } + return true +} + // storeHasConstraint returns whether a store's attributes or node's locality // matches the key value pair in the constraint. func storeHasConstraint(store roachpb.StoreDescriptor, c config.Constraint) bool { @@ -806,26 +963,191 @@ func storeHasConstraint(store roachpb.StoreDescriptor, c config.Constraint) bool return false } -// constraintCheck returns true iff all required and prohibited constraints are -// satisfied. Stores with attributes or localities that match the most positive -// constraints return higher scores. -func constraintCheck(store roachpb.StoreDescriptor, constraints config.Constraints) (bool, int) { - if len(constraints.Constraints) == 0 { - return true, 0 +type analyzedConstraints struct { + constraints []config.Constraints + // For each set of constraints in the above slice, track which StoreIDs + // satisfy them. This field is unused if there are no constraints. + satisfiedBy [][]roachpb.StoreID + // Maps from StoreID to the indices in the constraints slice of which + // constraints the store satisfies. This field is unused if there are no + // constraints. + satisfies map[roachpb.StoreID][]int +} + +// analyzeConstraints processes the zone config constraints that apply to a +// range along with the current replicas for a range, spitting back out +// information about which constraints are satisfied by which replicas and +// which replicas satisfy which constraints, aiding in allocation decisions. +func analyzeConstraints( + ctx context.Context, + getStoreDescFn func(roachpb.StoreID) (roachpb.StoreDescriptor, bool), + existing []roachpb.ReplicaDescriptor, + constraints []config.Constraints, +) analyzedConstraints { + result := analyzedConstraints{ + constraints: constraints, + } + + if len(constraints) > 0 { + result.satisfiedBy = make([][]roachpb.StoreID, len(constraints)) + result.satisfies = make(map[roachpb.StoreID][]int) + } + + for i, subConstraints := range constraints { + for _, repl := range existing { + // If for some reason we don't have the store descriptor (which shouldn't + // happen once a node is hooked into gossip), trust that it's valid. This + // is a much more stable failure state than frantically moving everything + // off such a node. + store, ok := getStoreDescFn(repl.StoreID) + if !ok || subConstraintsCheck(store, subConstraints) { + result.satisfiedBy[i] = append(result.satisfiedBy[i], store.StoreID) + result.satisfies[store.StoreID] = append(result.satisfies[store.StoreID], i) + } + } } - positive := 0 + return result +} + +// allocateConstraintsCheck checks the potential allocation target store +// against all the constraints. If it matches a constraint at all, it's valid. +// If it matches a constraint that is not already fully satisfied by existing +// replicas, then it's necessary. +// +// NB: This assumes that the sum of all constraints.NumReplicas is equal to +// configured number of replicas for the range, or that there's just one set of +// constraints with NumReplicas set to 0. This is meant to be enforced in the +// config package. +func allocateConstraintsCheck( + store roachpb.StoreDescriptor, analyzed analyzedConstraints, +) (valid bool, necessary bool) { + // All stores are valid when there are no constraints. + if len(analyzed.constraints) == 0 { + return true, false + } + + for i, constraints := range analyzed.constraints { + if constraintsOK := subConstraintsCheck(store, constraints); constraintsOK { + valid = true + matchingStores := analyzed.satisfiedBy[i] + if len(matchingStores) < int(constraints.NumReplicas) { + return true, true + } + } + } + + return valid, false +} + +// removeConstraintsCheck checks the existing store against the analyzed +// constraints, determining whether it's valid (matches some constraint) and +// necessary (matches some constraint that no other existing replica matches). +// The difference between this and allocateConstraintsCheck is that this is to +// be used on an existing replica of the range, not a potential addition. +func removeConstraintsCheck( + store roachpb.StoreDescriptor, analyzed analyzedConstraints, +) (valid bool, necessary bool) { + // All stores are valid when there are no constraints. + if len(analyzed.constraints) == 0 { + return true, false + } + + // The store satisfies none of the constraints. + if len(analyzed.satisfies[store.StoreID]) == 0 { + return false, false + } + + // Check if the store matches a constraint that isn't overly satisfied. + // If so, then keeping it around is necessary to ensure that constraint stays + // fully satisfied. + for _, constraintIdx := range analyzed.satisfies[store.StoreID] { + if len(analyzed.satisfiedBy[constraintIdx]) <= int(analyzed.constraints[constraintIdx].NumReplicas) { + return true, true + } + } + + // If neither of the above is true, then the store is valid but nonessential. + // NOTE: We could be more precise here by trying to find the least essential + // existing replica and only considering that one nonessential, but this is + // sufficient to avoid violating constraints. + return true, false +} + +// rebalanceConstraintsCheck checks the potential rebalance target store +// against the analyzed constraints, determining whether it's valid whether it +// will be necessary if fromStoreID (an existing replica) is removed from the +// range. +func rebalanceFromConstraintsCheck( + store roachpb.StoreDescriptor, fromStoreID roachpb.StoreID, analyzed analyzedConstraints, +) (valid bool, necessary bool) { + // All stores are valid when there are no constraints. + if len(analyzed.constraints) == 0 { + return true, false + } + + // Check the store against all the constraints. If it matches a constraint at + // all, it's valid. If it matches a constraint that is not already fully + // satisfied by existing replicas or that is only fully satisfied because of + // fromStoreID, then it's necessary. + // + // NB: This assumes that the sum of all constraints.NumReplicas is equal to + // configured number of replicas for the range, or that there's just one set + // of constraints with NumReplicas set to 0. This is meant to be enforced in + // the config package. + for i, constraints := range analyzed.constraints { + if constraintsOK := subConstraintsCheck(store, constraints); constraintsOK { + valid = true + matchingStores := analyzed.satisfiedBy[i] + if len(matchingStores) < int(constraints.NumReplicas) || + (len(matchingStores) == int(constraints.NumReplicas) && + containsStore(analyzed.satisfiedBy[i], fromStoreID)) { + return true, true + } + } + } + + return valid, false +} + +// containsStore returns true if the list of StoreIDs contains the target. +func containsStore(stores []roachpb.StoreID, target roachpb.StoreID) bool { + for _, storeID := range stores { + if storeID == target { + return true + } + } + return false +} + +// constraintsCheck returns true iff the provided store would be a valid in a +// range with the provided constraints. +func constraintsCheck(store roachpb.StoreDescriptor, constraints []config.Constraints) bool { + if len(constraints) == 0 { + return true + } + + for _, subConstraints := range constraints { + if constraintsOK := subConstraintsCheck(store, subConstraints); constraintsOK { + return true + } + } + return false +} + +// subConstraintsCheck checks a store against a single set of constraints (out +// of the possibly numerous sets that apply to a range), returning true iff the +// store matches the constraints. +func subConstraintsCheck(store roachpb.StoreDescriptor, constraints config.Constraints) bool { for _, constraint := range constraints.Constraints { hasConstraint := storeHasConstraint(store, constraint) switch { case constraint.Type == config.Constraint_REQUIRED && !hasConstraint: - return false, 0 + return false case constraint.Type == config.Constraint_PROHIBITED && hasConstraint: - return false, 0 - case (constraint.Type == config.Constraint_POSITIVE && hasConstraint): - positive++ + return false } } - return true, positive + return true } // rangeDiversityScore returns a value between 0 and 1 based on how diverse the diff --git a/pkg/storage/allocator_scorer_test.go b/pkg/storage/allocator_scorer_test.go index ae04b51267b4..e092f4e644e1 100644 --- a/pkg/storage/allocator_scorer_test.go +++ b/pkg/storage/allocator_scorer_test.go @@ -425,82 +425,158 @@ var ( } ) -func TestConstraintCheck(t *testing.T) { +func getTestStoreDesc(storeID roachpb.StoreID) (roachpb.StoreDescriptor, bool) { + desc, ok := testStores[storeID] + return desc, ok +} + +func testStoreReplicas(storeIDs []roachpb.StoreID) []roachpb.ReplicaDescriptor { + var result []roachpb.ReplicaDescriptor + for _, storeID := range storeIDs { + result = append(result, roachpb.ReplicaDescriptor{ + NodeID: roachpb.NodeID(storeID), + StoreID: storeID, + }) + } + return result +} + +func TestConstraintsCheck(t *testing.T) { defer leaktest.AfterTest(t)() testCases := []struct { name string - constraints []config.Constraint - expected map[roachpb.StoreID]int + constraints []config.Constraints + expected map[roachpb.StoreID]bool }{ { name: "required constraint", - constraints: []config.Constraint{ - {Value: "b", Type: config.Constraint_REQUIRED}, + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "b", Type: config.Constraint_REQUIRED}, + }, + }, }, - expected: map[roachpb.StoreID]int{ - testStoreUSa1: 0, - testStoreUSb: 0, + expected: map[roachpb.StoreID]bool{ + testStoreUSa1: true, + testStoreUSb: true, }, }, { name: "required locality constraints", - constraints: []config.Constraint{ - {Key: "datacenter", Value: "us", Type: config.Constraint_REQUIRED}, + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Key: "datacenter", Value: "us", Type: config.Constraint_REQUIRED}, + }, + }, }, - expected: map[roachpb.StoreID]int{ - testStoreUSa15: 0, - testStoreUSa15Dupe: 0, - testStoreUSa1: 0, - testStoreUSb: 0, + expected: map[roachpb.StoreID]bool{ + testStoreUSa15: true, + testStoreUSa15Dupe: true, + testStoreUSa1: true, + testStoreUSb: true, }, }, { name: "prohibited constraints", - constraints: []config.Constraint{ - {Value: "b", Type: config.Constraint_PROHIBITED}, + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "b", Type: config.Constraint_PROHIBITED}, + }, + }, }, - expected: map[roachpb.StoreID]int{ - testStoreUSa15: 0, - testStoreUSa15Dupe: 0, - testStoreEurope: 0, + expected: map[roachpb.StoreID]bool{ + testStoreUSa15: true, + testStoreUSa15Dupe: true, + testStoreEurope: true, }, }, { name: "prohibited locality constraints", - constraints: []config.Constraint{ - {Key: "datacenter", Value: "us", Type: config.Constraint_PROHIBITED}, + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Key: "datacenter", Value: "us", Type: config.Constraint_PROHIBITED}, + }, + }, }, - expected: map[roachpb.StoreID]int{ - testStoreEurope: 0, + expected: map[roachpb.StoreID]bool{ + testStoreEurope: true, }, }, { - name: "positive constraints", - constraints: []config.Constraint{ - {Value: "a"}, - {Value: "b"}, - {Value: "c"}, + name: "positive constraints are ignored", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "a", Type: config.Constraint_DEPRECATED_POSITIVE}, + {Value: "b", Type: config.Constraint_DEPRECATED_POSITIVE}, + {Value: "c", Type: config.Constraint_DEPRECATED_POSITIVE}, + }, + }, }, - expected: map[roachpb.StoreID]int{ - testStoreUSa15: 1, - testStoreUSa15Dupe: 1, - testStoreUSa1: 2, - testStoreUSb: 3, - testStoreEurope: 0, + expected: map[roachpb.StoreID]bool{ + testStoreUSa15: true, + testStoreUSa15Dupe: true, + testStoreUSa1: true, + testStoreUSb: true, + testStoreEurope: true, }, }, { - name: "positive locality constraints", - constraints: []config.Constraint{ - {Key: "datacenter", Value: "eur"}, + name: "positive locality constraints are ignored", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Key: "datacenter", Value: "eur", Type: config.Constraint_DEPRECATED_POSITIVE}, + }, + }, }, - expected: map[roachpb.StoreID]int{ - testStoreUSa15: 0, - testStoreUSa15Dupe: 0, - testStoreUSa1: 0, - testStoreUSb: 0, - testStoreEurope: 1, + expected: map[roachpb.StoreID]bool{ + testStoreUSa15: true, + testStoreUSa15Dupe: true, + testStoreUSa1: true, + testStoreUSb: true, + testStoreEurope: true, + }, + }, + { + name: "NumReplicas doesn't affect constraint checking", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Key: "datacenter", Value: "eur", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + }, + expected: map[roachpb.StoreID]bool{ + testStoreEurope: true, + }, + }, + { + name: "multiple per-replica constraints are respected", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Key: "datacenter", Value: "eur", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + { + Constraints: []config.Constraint{ + {Value: "b", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + }, + expected: map[roachpb.StoreID]bool{ + testStoreUSa1: true, + testStoreUSb: true, + testStoreEurope: true, }, }, } @@ -508,14 +584,288 @@ func TestConstraintCheck(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { for _, s := range testStores { - valid, positive := constraintCheck(s, config.Constraints{Constraints: tc.constraints}) - expectedPositive, ok := tc.expected[s.StoreID] + valid := constraintsCheck(s, tc.constraints) + ok := tc.expected[s.StoreID] if valid != ok { t.Errorf("expected store %d to be %t, but got %t", s.StoreID, ok, valid) continue } - if positive != expectedPositive { - t.Errorf("expected store %d to have %d positives, but got %d", s.StoreID, expectedPositive, positive) + } + }) + } +} + +func TestAllocateConstraintsCheck(t *testing.T) { + defer leaktest.AfterTest(t)() + + testCases := []struct { + name string + constraints []config.Constraints + existing []roachpb.StoreID + expectedValid map[roachpb.StoreID]bool + expectedNecessary map[roachpb.StoreID]bool + }{ + { + name: "prohibited constraint", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "b", Type: config.Constraint_PROHIBITED}, + }, + }, + }, + existing: nil, + expectedValid: map[roachpb.StoreID]bool{ + testStoreUSa15: true, + testStoreUSa15Dupe: true, + testStoreEurope: true, + }, + expectedNecessary: map[roachpb.StoreID]bool{}, + }, + { + name: "required constraint", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "b", Type: config.Constraint_REQUIRED}, + }, + }, + }, + existing: nil, + expectedValid: map[roachpb.StoreID]bool{ + testStoreUSa1: true, + testStoreUSb: true, + }, + expectedNecessary: map[roachpb.StoreID]bool{}, + }, + { + name: "required constraint with NumReplicas", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "b", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 3, + }, + }, + existing: nil, + expectedValid: map[roachpb.StoreID]bool{ + testStoreUSa1: true, + testStoreUSb: true, + }, + expectedNecessary: map[roachpb.StoreID]bool{ + testStoreUSa1: true, + testStoreUSb: true, + }, + }, + { + name: "multiple required constraints with NumReplicas", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "a", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + { + Constraints: []config.Constraint{ + {Value: "b", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + }, + existing: nil, + expectedValid: map[roachpb.StoreID]bool{ + testStoreUSa15: true, + testStoreUSa15Dupe: true, + testStoreUSa1: true, + testStoreUSb: true, + }, + expectedNecessary: map[roachpb.StoreID]bool{ + testStoreUSa15: true, + testStoreUSa15Dupe: true, + testStoreUSa1: true, + testStoreUSb: true, + }, + }, + { + name: "multiple required constraints with NumReplicas and existing replicas", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "a", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + { + Constraints: []config.Constraint{ + {Value: "b", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + }, + existing: []roachpb.StoreID{testStoreUSa1}, + expectedValid: map[roachpb.StoreID]bool{ + testStoreUSa15: true, + testStoreUSa15Dupe: true, + testStoreUSa1: true, + testStoreUSb: true, + }, + expectedNecessary: map[roachpb.StoreID]bool{}, + }, + { + name: "multiple required constraints with NumReplicas and not enough existing replicas", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "a", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + { + Constraints: []config.Constraint{ + {Value: "b", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 2, + }, + }, + existing: []roachpb.StoreID{testStoreUSa1}, + expectedValid: map[roachpb.StoreID]bool{ + testStoreUSa15: true, + testStoreUSa15Dupe: true, + testStoreUSa1: true, + testStoreUSb: true, + }, + expectedNecessary: map[roachpb.StoreID]bool{ + testStoreUSa1: true, + testStoreUSb: true, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + analyzed := analyzeConstraints( + context.Background(), getTestStoreDesc, testStoreReplicas(tc.existing), tc.constraints) + for _, s := range testStores { + valid, necessary := allocateConstraintsCheck(s, analyzed) + if e, a := tc.expectedValid[s.StoreID], valid; e != a { + t.Errorf("expected allocateConstraintsCheck(s%d).valid to be %t, but got %t", + s.StoreID, e, a) + } + if e, a := tc.expectedNecessary[s.StoreID], necessary; e != a { + t.Errorf("expected allocateConstraintsCheck(s%d).necessary to be %t, but got %t", + s.StoreID, e, a) + } + } + }) + } +} + +func TestRemoveConstraintsCheck(t *testing.T) { + defer leaktest.AfterTest(t)() + + type expected struct { + valid, necessary bool + } + testCases := []struct { + name string + constraints []config.Constraints + expected map[roachpb.StoreID]expected + }{ + { + name: "prohibited constraint", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "b", Type: config.Constraint_PROHIBITED}, + }, + }, + }, + expected: map[roachpb.StoreID]expected{ + testStoreUSa15: {true, false}, + testStoreUSa15Dupe: {true, false}, + testStoreEurope: {true, false}, + testStoreUSa1: {false, false}, + }, + }, + { + name: "required constraint", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "b", Type: config.Constraint_REQUIRED}, + }, + }, + }, + expected: map[roachpb.StoreID]expected{ + testStoreUSa15: {false, false}, + testStoreUSa15Dupe: {false, false}, + testStoreEurope: {false, false}, + testStoreUSa1: {true, false}, + }, + }, + { + name: "required constraint with NumReplicas", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "b", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 2, + }, + }, + expected: map[roachpb.StoreID]expected{ + testStoreUSa15: {false, false}, + testStoreEurope: {false, false}, + testStoreUSa1: {true, true}, + testStoreUSb: {true, true}, + }, + }, + { + name: "multiple required constraints with NumReplicas", + constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "a", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + { + Constraints: []config.Constraint{ + {Value: "b", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + }, + expected: map[roachpb.StoreID]expected{ + testStoreUSa15: {true, false}, + testStoreUSa1: {true, true}, + testStoreEurope: {false, false}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var existing []roachpb.ReplicaDescriptor + for storeID := range tc.expected { + existing = append(existing, roachpb.ReplicaDescriptor{ + NodeID: roachpb.NodeID(storeID), + StoreID: storeID, + }) + } + analyzed := analyzeConstraints( + context.Background(), getTestStoreDesc, existing, tc.constraints) + for storeID, expected := range tc.expected { + valid, necessary := removeConstraintsCheck(testStores[storeID], analyzed) + if e, a := expected.valid, valid; e != a { + t.Errorf("expected removeConstraintsCheck(s%d).valid to be %t, but got %t", + storeID, e, a) + } + if e, a := expected.necessary, necessary; e != a { + t.Errorf("expected removeConstraintsCheck(s%d).necessary to be %t, but got %t", + storeID, e, a) } } }) @@ -637,24 +987,45 @@ func TestShouldRebalanceDiversity(t *testing.T) { }, } for i, tc := range testCases { + removeStore := func(sl StoreList, nodeID roachpb.NodeID) StoreList { + for i, s := range sl.stores { + if s.Node.NodeID == nodeID { + return makeStoreList(append(sl.stores[:i], sl.stores[i+1:]...)) + } + } + return sl + } + filteredSL := tc.sl + filteredSL.stores = append([]roachpb.StoreDescriptor(nil), filteredSL.stores...) + existingNodeLocalities := make(map[roachpb.NodeID]roachpb.Locality) rangeInfo := RangeInfo{ Desc: &roachpb.RangeDescriptor{}, } - existingNodeLocalities := make(map[roachpb.NodeID]roachpb.Locality) for _, nodeID := range tc.existingNodeIDs { rangeInfo.Desc.Replicas = append(rangeInfo.Desc.Replicas, roachpb.ReplicaDescriptor{ - NodeID: nodeID, + NodeID: nodeID, + StoreID: roachpb.StoreID(nodeID), }) existingNodeLocalities[nodeID] = localityForNodeID(tc.sl, nodeID) + // For the sake of testing, remove all other existing stores from the + // store list to only test whether we want to remove the replica on tc.s. + if nodeID != tc.s.Node.NodeID { + filteredSL = removeStore(filteredSL, nodeID) + } } - actual := shouldRebalance( + + targets := rebalanceCandidates( context.Background(), - tc.s, - tc.sl, + filteredSL, + analyzedConstraints{}, rangeInfo, existingNodeLocalities, - options, - ) + func(nodeID roachpb.NodeID) string { + locality := localityForNodeID(tc.sl, nodeID) + return locality.String() + }, + options) + actual := len(targets) > 0 if actual != tc.expected { t.Errorf("%d: shouldRebalance on s%d with replicas on %v got %t, expected %t", i, tc.s.StoreID, tc.existingNodeIDs, actual, tc.expected) diff --git a/pkg/storage/allocator_test.go b/pkg/storage/allocator_test.go index 37319f37cb7e..d86473f99d51 100644 --- a/pkg/storage/allocator_test.go +++ b/pkg/storage/allocator_test.go @@ -54,17 +54,21 @@ var firstRangeInfo = testRangeInfo([]roachpb.ReplicaDescriptor{}, firstRange) var simpleZoneConfig = config.ZoneConfig{ NumReplicas: 1, - Constraints: config.Constraints{ - Constraints: []config.Constraint{ - {Value: "a"}, - {Value: "ssd"}, + Constraints: []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "a", Type: config.Constraint_REQUIRED}, + {Value: "ssd", Type: config.Constraint_REQUIRED}, + }, }, }, } var multiDCConfig = config.ZoneConfig{ NumReplicas: 2, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "ssd"}}}, + Constraints: []config.Constraints{ + {Constraints: []config.Constraint{{Value: "ssd", Type: config.Constraint_REQUIRED}}}, + }, } var singleStore = []*roachpb.StoreDescriptor{ @@ -183,8 +187,10 @@ var multiDCStores = []*roachpb.StoreDescriptor{ var multiDiversityDCStores = []*roachpb.StoreDescriptor{ { StoreID: 1, + Attrs: roachpb.Attributes{Attrs: []string{"ssd"}}, Node: roachpb.NodeDescriptor{ NodeID: 1, + Attrs: roachpb.Attributes{Attrs: []string{"odd"}}, Locality: roachpb.Locality{ Tiers: []roachpb.Tier{ {Key: "datacenter", Value: "a"}, @@ -194,8 +200,10 @@ var multiDiversityDCStores = []*roachpb.StoreDescriptor{ }, { StoreID: 2, + Attrs: roachpb.Attributes{Attrs: []string{"hdd"}}, Node: roachpb.NodeDescriptor{ NodeID: 2, + Attrs: roachpb.Attributes{Attrs: []string{"even"}}, Locality: roachpb.Locality{ Tiers: []roachpb.Tier{ {Key: "datacenter", Value: "a"}, @@ -205,8 +213,10 @@ var multiDiversityDCStores = []*roachpb.StoreDescriptor{ }, { StoreID: 3, + Attrs: roachpb.Attributes{Attrs: []string{"ssd"}}, Node: roachpb.NodeDescriptor{ NodeID: 3, + Attrs: roachpb.Attributes{Attrs: []string{"odd"}}, Locality: roachpb.Locality{ Tiers: []roachpb.Tier{ {Key: "datacenter", Value: "b"}, @@ -216,8 +226,10 @@ var multiDiversityDCStores = []*roachpb.StoreDescriptor{ }, { StoreID: 4, + Attrs: roachpb.Attributes{Attrs: []string{"hdd"}}, Node: roachpb.NodeDescriptor{ NodeID: 4, + Attrs: roachpb.Attributes{Attrs: []string{"even"}}, Locality: roachpb.Locality{ Tiers: []roachpb.Tier{ {Key: "datacenter", Value: "b"}, @@ -227,8 +239,10 @@ var multiDiversityDCStores = []*roachpb.StoreDescriptor{ }, { StoreID: 5, + Attrs: roachpb.Attributes{Attrs: []string{"ssd"}}, Node: roachpb.NodeDescriptor{ NodeID: 5, + Attrs: roachpb.Attributes{Attrs: []string{"odd"}}, Locality: roachpb.Locality{ Tiers: []roachpb.Tier{ {Key: "datacenter", Value: "c"}, @@ -238,8 +252,10 @@ var multiDiversityDCStores = []*roachpb.StoreDescriptor{ }, { StoreID: 6, + Attrs: roachpb.Attributes{Attrs: []string{"hdd"}}, Node: roachpb.NodeDescriptor{ NodeID: 6, + Attrs: roachpb.Attributes{Attrs: []string{"even"}}, Locality: roachpb.Locality{ Tiers: []roachpb.Tier{ {Key: "datacenter", Value: "c"}, @@ -249,8 +265,10 @@ var multiDiversityDCStores = []*roachpb.StoreDescriptor{ }, { StoreID: 7, + Attrs: roachpb.Attributes{Attrs: []string{"ssd"}}, Node: roachpb.NodeDescriptor{ NodeID: 7, + Attrs: roachpb.Attributes{Attrs: []string{"odd"}}, Locality: roachpb.Locality{ Tiers: []roachpb.Tier{ {Key: "datacenter", Value: "d"}, @@ -260,8 +278,10 @@ var multiDiversityDCStores = []*roachpb.StoreDescriptor{ }, { StoreID: 8, + Attrs: roachpb.Attributes{Attrs: []string{"hdd"}}, Node: roachpb.NodeDescriptor{ NodeID: 8, + Attrs: roachpb.Attributes{Attrs: []string{"even"}}, Locality: roachpb.Locality{ Tiers: []roachpb.Tier{ {Key: "datacenter", Value: "d"}, @@ -408,7 +428,7 @@ func TestAllocatorCorruptReplica(t *testing.T) { t.Fatal(err) } if result.Node.NodeID != 2 || result.StoreID != 2 { - t.Errorf("expected NodeID 2 and StoreID 2: %+v", result) + t.Errorf("expected NodeID 2 and StoreID 2; got %+v", result) } } @@ -497,10 +517,12 @@ func TestAllocatorExistingReplica(t *testing.T) { gossiputil.NewStoreGossiper(g).GossipStores(sameDCStores, t) result, _, err := a.AllocateTarget( context.Background(), - config.Constraints{ - Constraints: []config.Constraint{ - {Value: "a"}, - {Value: "hdd"}, + []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "a", Type: config.Constraint_REQUIRED}, + {Value: "hdd", Type: config.Constraint_REQUIRED}, + }, }, }, []roachpb.ReplicaDescriptor{ @@ -581,9 +603,9 @@ func TestAllocatorRebalance(t *testing.T) { for i := 0; i < 10; i++ { result, _ := a.RebalanceTarget( ctx, - config.Constraints{}, + nil, /* constraints */ nil, - testRangeInfo([]roachpb.ReplicaDescriptor{{StoreID: 3}}, firstRange), + testRangeInfo([]roachpb.ReplicaDescriptor{{NodeID: 3, StoreID: 3}}, firstRange), storeFilterThrottled, false, ) @@ -605,7 +627,7 @@ func TestAllocatorRebalance(t *testing.T) { t.Fatalf("%d: unable to get store %d descriptor", i, store.StoreID) } sl, _, _ := a.storePool.getStoreList(firstRange, storeFilterThrottled) - result := shouldRebalance(ctx, desc, sl, firstRangeInfo, nil, a.scorerOptions(false)) + result := shouldRebalance(ctx, desc, sl, firstRangeInfo, a.scorerOptions(false)) if expResult := (i >= 2); expResult != result { t.Errorf("%d: expected rebalance %t; got %t; desc %+v; sl: %+v", i, expResult, result, desc, sl) } @@ -736,7 +758,7 @@ func TestAllocatorRebalanceTarget(t *testing.T) { for i := 0; i < 10; i++ { result, details := a.RebalanceTarget( context.Background(), - config.Constraints{}, + nil, /* constraints */ status, rangeInfo, storeFilterThrottled, @@ -756,7 +778,7 @@ func TestAllocatorRebalanceTarget(t *testing.T) { for i := 0; i < 10; i++ { result, details := a.RebalanceTarget( context.Background(), - config.Constraints{}, + nil, /* constraints */ status, rangeInfo, storeFilterThrottled, @@ -768,20 +790,20 @@ func TestAllocatorRebalanceTarget(t *testing.T) { } // Make sure rebalancing does happen if we drop just a little further down. - stores[1].Capacity.RangeCount = 45 + stores[1].Capacity.RangeCount = 44 sg.GossipStores(stores, t) for i := 0; i < 10; i++ { result, details := a.RebalanceTarget( context.Background(), - config.Constraints{}, + nil, /* constraints */ status, rangeInfo, storeFilterThrottled, false, ) if result == nil || result.StoreID != stores[1].StoreID { - t.Fatalf("expected rebalance to s%d, but got %v; details: %s", - stores[1].StoreID, result, details) + t.Fatalf("%d: expected rebalance to s%d, but got %v; details: %s", + i, stores[1].StoreID, result, details) } } } @@ -828,6 +850,7 @@ func TestAllocatorRebalanceDeadNodes(t *testing.T) { replicas := func(storeIDs ...roachpb.StoreID) []roachpb.ReplicaDescriptor { res := make([]roachpb.ReplicaDescriptor, len(storeIDs)) for i, storeID := range storeIDs { + res[i].NodeID = roachpb.NodeID(storeID) res[i].StoreID = storeID } return res @@ -858,7 +881,7 @@ func TestAllocatorRebalanceDeadNodes(t *testing.T) { for _, c := range testCases { t.Run("", func(t *testing.T) { result, _ := a.RebalanceTarget( - ctx, config.Constraints{}, nil, testRangeInfo(c.existing, firstRange), storeFilterThrottled, false) + ctx, nil, nil, testRangeInfo(c.existing, firstRange), storeFilterThrottled, false) if c.expected > 0 { if result == nil { t.Fatalf("expected %d, but found nil", c.expected) @@ -1001,7 +1024,7 @@ func TestAllocatorRebalanceThrashing(t *testing.T) { if !ok { t.Fatalf("[store %d]: unable to get store %d descriptor", j, store.StoreID) } - if a, e := shouldRebalance(context.Background(), desc, sl, firstRangeInfo, nil, a.scorerOptions(false)), cluster[j].shouldRebalanceFrom; a != e { + if a, e := shouldRebalance(context.Background(), desc, sl, firstRangeInfo, a.scorerOptions(false)), cluster[j].shouldRebalanceFrom; a != e { t.Errorf("[store %d]: shouldRebalance %t != expected %t", store.StoreID, a, e) } } @@ -1052,7 +1075,7 @@ func TestAllocatorRebalanceByCount(t *testing.T) { for i := 0; i < 10; i++ { result, _ := a.RebalanceTarget( ctx, - config.Constraints{}, + nil, /* constraints */ nil, testRangeInfo([]roachpb.ReplicaDescriptor{{StoreID: stores[0].StoreID}}, firstRange), storeFilterThrottled, @@ -1070,7 +1093,7 @@ func TestAllocatorRebalanceByCount(t *testing.T) { t.Fatalf("%d: unable to get store %d descriptor", i, store.StoreID) } sl, _, _ := a.storePool.getStoreList(firstRange, storeFilterThrottled) - result := shouldRebalance(ctx, desc, sl, firstRangeInfo, nil, a.scorerOptions(false)) + result := shouldRebalance(ctx, desc, sl, firstRangeInfo, a.scorerOptions(false)) if expResult := (i < 3); expResult != result { t.Errorf("%d: expected rebalance %t; got %t", i, expResult, result) } @@ -1124,7 +1147,7 @@ func TestAllocatorTransferLeaseTarget(t *testing.T) { t.Run("", func(t *testing.T) { target := a.TransferLeaseTarget( context.Background(), - config.Constraints{}, + nil, /* constraints */ c.existing, c.leaseholder, 0, @@ -1178,7 +1201,7 @@ func TestAllocatorTransferLeaseTargetMultiStore(t *testing.T) { t.Run("", func(t *testing.T) { target := a.TransferLeaseTarget( context.Background(), - config.Constraints{}, + nil, /* constraints */ existing, c.leaseholder, 0, @@ -1241,7 +1264,7 @@ func TestAllocatorShouldTransferLease(t *testing.T) { t.Run("", func(t *testing.T) { result := a.ShouldTransferLease( context.Background(), - config.Constraints{}, + nil, /* constraints */ c.existing, c.leaseholder, 0, @@ -1292,241 +1315,1476 @@ func TestAllocatorRemoveTargetLocality(t *testing.T) { []roachpb.StoreID{1, 3, 4, 6, 7, 8}, []roachpb.StoreID{3, 4, 7, 8}, }, - } - for _, c := range testCases { - existingRepls := make([]roachpb.ReplicaDescriptor, len(c.existing)) - for i, storeID := range c.existing { - existingRepls[i] = roachpb.ReplicaDescriptor{ - NodeID: roachpb.NodeID(storeID), - StoreID: storeID, - } - } - targetRepl, details, err := a.RemoveTarget( - context.Background(), - config.Constraints{}, - existingRepls, - testRangeInfo(existingRepls, firstRange), - false, - ) - if err != nil { - t.Fatal(err) - } - var found bool - for _, storeID := range c.expected { - if targetRepl.StoreID == storeID { - found = true - break - } - } - if !found { - t.Errorf("expected RemoveTarget(%v) in %v, but got %d; details: %s", c.existing, c.expected, targetRepl.StoreID, details) - } - } -} - -func TestAllocatorAllocateTargetLocality(t *testing.T) { - defer leaktest.AfterTest(t)() - - stopper, g, _, a, _ := createTestAllocator( /* deterministic */ false) - defer stopper.Stop(context.Background()) - sg := gossiputil.NewStoreGossiper(g) - sg.GossipStores(multiDiversityDCStores, t) - - // Given a set of existing replicas for a range, rank which of the remaining - // stores from multiDiversityDCStores would be the best addition to the range - // purely on the basis of locality diversity. - testCases := []struct { - existing []roachpb.StoreID - expected []roachpb.StoreID - }{ + } + for _, c := range testCases { + existingRepls := make([]roachpb.ReplicaDescriptor, len(c.existing)) + for i, storeID := range c.existing { + existingRepls[i] = roachpb.ReplicaDescriptor{ + NodeID: roachpb.NodeID(storeID), + StoreID: storeID, + } + } + targetRepl, details, err := a.RemoveTarget( + context.Background(), + nil, /* constraints */ + existingRepls, + testRangeInfo(existingRepls, firstRange), + false, + ) + if err != nil { + t.Fatal(err) + } + var found bool + for _, storeID := range c.expected { + if targetRepl.StoreID == storeID { + found = true + break + } + } + if !found { + t.Errorf("expected RemoveTarget(%v) in %v, but got %d; details: %s", c.existing, c.expected, targetRepl.StoreID, details) + } + } +} + +func TestAllocatorAllocateTargetLocality(t *testing.T) { + defer leaktest.AfterTest(t)() + + stopper, g, _, a, _ := createTestAllocator( /* deterministic */ false) + defer stopper.Stop(context.Background()) + sg := gossiputil.NewStoreGossiper(g) + sg.GossipStores(multiDiversityDCStores, t) + + // Given a set of existing replicas for a range, rank which of the remaining + // stores from multiDiversityDCStores would be the best addition to the range + // purely on the basis of locality diversity. + testCases := []struct { + existing []roachpb.StoreID + expected []roachpb.StoreID + }{ + { + []roachpb.StoreID{1, 2, 3}, + []roachpb.StoreID{5, 6, 7, 8}, + }, + { + []roachpb.StoreID{1, 3, 4}, + []roachpb.StoreID{5, 6, 7, 8}, + }, + { + []roachpb.StoreID{3, 4, 5}, + []roachpb.StoreID{1, 2, 7, 8}, + }, + { + []roachpb.StoreID{1, 7, 8}, + []roachpb.StoreID{3, 4, 5, 6}, + }, + { + []roachpb.StoreID{5, 7, 8}, + []roachpb.StoreID{1, 2, 3, 4}, + }, + { + []roachpb.StoreID{1, 3, 5}, + []roachpb.StoreID{7, 8}, + }, + { + []roachpb.StoreID{1, 3, 7}, + []roachpb.StoreID{5, 6}, + }, + { + []roachpb.StoreID{1, 5, 7}, + []roachpb.StoreID{3, 4}, + }, + { + []roachpb.StoreID{3, 5, 7}, + []roachpb.StoreID{1, 2}, + }, + } + + for _, c := range testCases { + existingRepls := make([]roachpb.ReplicaDescriptor, len(c.existing)) + for i, storeID := range c.existing { + existingRepls[i] = roachpb.ReplicaDescriptor{ + NodeID: roachpb.NodeID(storeID), + StoreID: storeID, + } + } + targetStore, details, err := a.AllocateTarget( + context.Background(), + nil, /* constraints */ + existingRepls, + testRangeInfo(existingRepls, firstRange), + false, + ) + if err != nil { + t.Fatal(err) + } + var found bool + for _, storeID := range c.expected { + if targetStore.StoreID == storeID { + found = true + break + } + } + if !found { + t.Errorf("expected AllocateTarget(%v) in %v, but got %d; details: %s", c.existing, c.expected, targetStore.StoreID, details) + } + } +} + +func TestAllocatorRebalanceTargetLocality(t *testing.T) { + defer leaktest.AfterTest(t)() + + stopper, g, _, a, _ := createTestAllocator( /* deterministic */ false) + defer stopper.Stop(context.Background()) + + stores := []*roachpb.StoreDescriptor{ + { + StoreID: 1, + Node: multiDiversityDCStores[0].Node, + Capacity: roachpb.StoreCapacity{RangeCount: 10}, + }, + { + StoreID: 2, + Node: multiDiversityDCStores[1].Node, + Capacity: roachpb.StoreCapacity{RangeCount: 20}, + }, + { + StoreID: 3, + Node: multiDiversityDCStores[2].Node, + Capacity: roachpb.StoreCapacity{RangeCount: 10}, + }, + { + StoreID: 4, + Node: multiDiversityDCStores[3].Node, + Capacity: roachpb.StoreCapacity{RangeCount: 20}, + }, + { + StoreID: 5, + Node: multiDiversityDCStores[4].Node, + Capacity: roachpb.StoreCapacity{RangeCount: 10}, + }, + { + StoreID: 6, + Node: multiDiversityDCStores[5].Node, + Capacity: roachpb.StoreCapacity{RangeCount: 20}, + }, + } + sg := gossiputil.NewStoreGossiper(g) + sg.GossipStores(stores, t) + + testCases := []struct { + existing []roachpb.StoreID + expected []roachpb.StoreID + }{ + { + []roachpb.StoreID{1, 2, 3}, + []roachpb.StoreID{5}, + }, + { + []roachpb.StoreID{1, 3, 4}, + []roachpb.StoreID{5}, + }, + { + []roachpb.StoreID{1, 3, 6}, + []roachpb.StoreID{5}, + }, + { + []roachpb.StoreID{1, 2, 5}, + []roachpb.StoreID{3}, + }, + { + []roachpb.StoreID{1, 2, 6}, + []roachpb.StoreID{3}, + }, + { + []roachpb.StoreID{1, 4, 5}, + []roachpb.StoreID{3}, + }, + { + []roachpb.StoreID{1, 4, 6}, + []roachpb.StoreID{3, 5}, + }, + { + []roachpb.StoreID{3, 4, 5}, + []roachpb.StoreID{1}, + }, + { + []roachpb.StoreID{3, 4, 6}, + []roachpb.StoreID{1}, + }, + { + []roachpb.StoreID{4, 5, 6}, + []roachpb.StoreID{1}, + }, + { + []roachpb.StoreID{2, 4, 6}, + []roachpb.StoreID{1, 3, 5}, + }, + } + + for i, c := range testCases { + existingRepls := make([]roachpb.ReplicaDescriptor, len(c.existing)) + for i, storeID := range c.existing { + existingRepls[i] = roachpb.ReplicaDescriptor{ + NodeID: roachpb.NodeID(storeID), + StoreID: storeID, + } + } + targetStore, details := a.RebalanceTarget( + context.Background(), + nil, /* constraints */ + nil, + testRangeInfo(existingRepls, firstRange), + storeFilterThrottled, + false, + ) + if targetStore == nil { + t.Fatalf("%d: RebalanceTarget(%v) returned no target store; details: %s", i, c.existing, details) + } + var found bool + for _, storeID := range c.expected { + if targetStore.StoreID == storeID { + found = true + break + } + } + if !found { + t.Errorf("%d: expected RebalanceTarget(%v) in %v, but got %d; details: %s", + i, c.existing, c.expected, targetStore.StoreID, details) + } + } +} + +var ( + threeSpecificLocalities = []config.Constraints{ + { + Constraints: []config.Constraint{ + {Key: "datacenter", Value: "a", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + { + Constraints: []config.Constraint{ + {Key: "datacenter", Value: "b", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + { + Constraints: []config.Constraint{ + {Key: "datacenter", Value: "c", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + } + + twoAndOneLocalities = []config.Constraints{ + { + Constraints: []config.Constraint{ + {Key: "datacenter", Value: "a", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 2, + }, + { + Constraints: []config.Constraint{ + {Key: "datacenter", Value: "b", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + } + + threeInOneLocality = []config.Constraints{ + { + Constraints: []config.Constraint{ + {Key: "datacenter", Value: "a", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 3, + }, + } + + twoAndOneNodeAttrs = []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "ssd", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 2, + }, + { + Constraints: []config.Constraint{ + {Value: "hdd", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + } + + twoAndOneStoreAttrs = []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "odd", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 2, + }, + { + Constraints: []config.Constraint{ + {Value: "even", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + } + + mixLocalityAndAttrs = []config.Constraints{ + { + Constraints: []config.Constraint{ + {Key: "datacenter", Value: "a", Type: config.Constraint_REQUIRED}, + {Value: "ssd", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + { + Constraints: []config.Constraint{ + {Key: "datacenter", Value: "b", Type: config.Constraint_REQUIRED}, + {Value: "odd", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + { + Constraints: []config.Constraint{ + {Value: "even", Type: config.Constraint_REQUIRED}, + }, + NumReplicas: 1, + }, + } +) + +func TestAllocateCandidatesNumReplicasConstraints(t *testing.T) { + defer leaktest.AfterTest(t)() + + stopper, g, _, a, _ := createTestAllocator( /* deterministic */ false) + defer stopper.Stop(context.Background()) + sg := gossiputil.NewStoreGossiper(g) + sg.GossipStores(multiDiversityDCStores, t) + sl, _, _ := a.storePool.getStoreList(firstRange, storeFilterThrottled) + + // Given a set of existing replicas for a range, rank which of the remaining + // stores from multiDiversityDCStores would be the best addition to the range + // purely on the basis of constraint satisfaction and locality diversity. + testCases := []struct { + constraints []config.Constraints + existing []roachpb.StoreID + expected []roachpb.StoreID + }{ + { + threeSpecificLocalities, + []roachpb.StoreID{}, + []roachpb.StoreID{1, 2, 3, 4, 5, 6}, + }, + { + threeSpecificLocalities, + []roachpb.StoreID{7, 8}, + []roachpb.StoreID{1, 2, 3, 4, 5, 6}, + }, + { + threeSpecificLocalities, + []roachpb.StoreID{1}, + []roachpb.StoreID{3, 4, 5, 6}, + }, + { + threeSpecificLocalities, + []roachpb.StoreID{3, 5}, + []roachpb.StoreID{1, 2}, + }, + { + threeSpecificLocalities, + []roachpb.StoreID{3, 4}, + []roachpb.StoreID{1, 2, 5, 6}, + }, + { + threeSpecificLocalities, + []roachpb.StoreID{1, 3, 5}, + []roachpb.StoreID{2, 4, 6}, + }, + { + twoAndOneLocalities, + []roachpb.StoreID{}, + []roachpb.StoreID{1, 2, 3, 4}, + }, + { + twoAndOneLocalities, + []roachpb.StoreID{1}, + []roachpb.StoreID{3, 4}, // 2 isn't included because its diversity is worse + }, + { + twoAndOneLocalities, + []roachpb.StoreID{1, 2}, + []roachpb.StoreID{3, 4}, + }, + { + twoAndOneLocalities, + []roachpb.StoreID{1, 2, 3}, + []roachpb.StoreID{4}, + }, + { + twoAndOneLocalities, + []roachpb.StoreID{3}, + []roachpb.StoreID{1, 2}, + }, + { + twoAndOneLocalities, + []roachpb.StoreID{5}, + []roachpb.StoreID{1, 2, 3, 4}, + }, + { + threeInOneLocality, + []roachpb.StoreID{}, + []roachpb.StoreID{1, 2}, + }, + { + threeInOneLocality, + []roachpb.StoreID{3, 4, 5}, + []roachpb.StoreID{1, 2}, + }, + { + threeInOneLocality, + []roachpb.StoreID{1}, + []roachpb.StoreID{2}, + }, + { + threeInOneLocality, + []roachpb.StoreID{1, 2}, + []roachpb.StoreID{}, + }, + { + twoAndOneNodeAttrs, + []roachpb.StoreID{}, + []roachpb.StoreID{1, 2, 3, 4, 5, 6, 7, 8}, + }, + { + twoAndOneNodeAttrs, + []roachpb.StoreID{2}, + []roachpb.StoreID{3, 5, 7}, + }, + { + twoAndOneNodeAttrs, + []roachpb.StoreID{1}, + []roachpb.StoreID{3, 4, 5, 6, 7, 8}, + }, + { + twoAndOneNodeAttrs, + []roachpb.StoreID{1, 2}, + []roachpb.StoreID{3, 5, 7}, + }, + { + twoAndOneNodeAttrs, + []roachpb.StoreID{1, 3}, + []roachpb.StoreID{6, 8}, + }, + { + twoAndOneNodeAttrs, + []roachpb.StoreID{1, 3, 6}, + []roachpb.StoreID{7, 8}, + }, + { + twoAndOneStoreAttrs, + []roachpb.StoreID{}, + []roachpb.StoreID{1, 2, 3, 4, 5, 6, 7, 8}, + }, + { + twoAndOneStoreAttrs, + []roachpb.StoreID{2}, + []roachpb.StoreID{3, 5, 7}, + }, + { + twoAndOneStoreAttrs, + []roachpb.StoreID{1}, + []roachpb.StoreID{3, 4, 5, 6, 7, 8}, + }, + { + twoAndOneStoreAttrs, + []roachpb.StoreID{1, 2}, + []roachpb.StoreID{3, 5, 7}, + }, + { + twoAndOneStoreAttrs, + []roachpb.StoreID{1, 3}, + []roachpb.StoreID{6, 8}, + }, + { + twoAndOneStoreAttrs, + []roachpb.StoreID{1, 3, 6}, + []roachpb.StoreID{7, 8}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{}, + []roachpb.StoreID{1, 2, 3, 4, 6, 8}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{1}, + []roachpb.StoreID{3, 4, 6, 8}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{2}, + []roachpb.StoreID{3}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{3}, + []roachpb.StoreID{1, 2, 6, 8}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{2, 3}, + []roachpb.StoreID{1}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{1, 2}, + []roachpb.StoreID{3}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{1, 3}, + []roachpb.StoreID{6, 8}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{1, 2, 3}, + []roachpb.StoreID{6, 8}, + }, + } + + for testIdx, tc := range testCases { + existingRepls := make([]roachpb.ReplicaDescriptor, len(tc.existing)) + for i, storeID := range tc.existing { + existingRepls[i] = roachpb.ReplicaDescriptor{ + NodeID: roachpb.NodeID(storeID), + StoreID: storeID, + } + } + rangeInfo := testRangeInfo(existingRepls, firstRange) + analyzed := analyzeConstraints( + context.Background(), a.storePool.getStoreDescriptor, rangeInfo.Desc.Replicas, tc.constraints) + candidates := allocateCandidates( + sl, + analyzed, + existingRepls, + rangeInfo, + a.storePool.getLocalities(existingRepls), + a.scorerOptions(false), + ) + best := candidates.best() + match := true + if len(tc.expected) != len(best) { + match = false + } else { + sort.Slice(best, func(i, j int) bool { + return best[i].store.StoreID < best[j].store.StoreID + }) + for i := range tc.expected { + if tc.expected[i] != best[i].store.StoreID { + match = false + break + } + } + } + if !match { + t.Errorf("%d: expected allocateCandidates(%v) = %v, but got %v", + testIdx, tc.existing, tc.expected, candidates) + } + } +} + +func TestRemoveCandidatesNumReplicasConstraints(t *testing.T) { + defer leaktest.AfterTest(t)() + + stopper, g, _, a, _ := createTestAllocator( /* deterministic */ false) + defer stopper.Stop(context.Background()) + sg := gossiputil.NewStoreGossiper(g) + sg.GossipStores(multiDiversityDCStores, t) + + // Given a set of existing replicas for a range, rank which of the remaining + // stores would be best to remove if we had to remove one purely on the basis + // of constraint-matching and locality diversity. + testCases := []struct { + constraints []config.Constraints + existing []roachpb.StoreID + expected []roachpb.StoreID + }{ + { + threeSpecificLocalities, + []roachpb.StoreID{1}, + []roachpb.StoreID{1}, + }, + { + threeSpecificLocalities, + []roachpb.StoreID{1, 2}, + []roachpb.StoreID{1, 2}, + }, + { + threeSpecificLocalities, + []roachpb.StoreID{1, 3}, + []roachpb.StoreID{1, 3}, + }, + { + threeSpecificLocalities, + []roachpb.StoreID{1, 2, 3}, + []roachpb.StoreID{1, 2}, + }, + { + threeSpecificLocalities, + []roachpb.StoreID{1, 3, 5}, + []roachpb.StoreID{1, 3, 5}, + }, + { + threeSpecificLocalities, + []roachpb.StoreID{1, 3, 7}, + []roachpb.StoreID{7}, + }, + { + twoAndOneLocalities, + []roachpb.StoreID{1, 3}, + []roachpb.StoreID{1, 3}, + }, + { + twoAndOneLocalities, + []roachpb.StoreID{1, 3, 5}, + []roachpb.StoreID{5}, + }, + { + twoAndOneLocalities, + []roachpb.StoreID{1, 3, 4}, + []roachpb.StoreID{3, 4}, + }, + { + twoAndOneLocalities, + []roachpb.StoreID{1, 2, 3}, + []roachpb.StoreID{1, 2}, + }, + { + threeInOneLocality, + []roachpb.StoreID{1, 3}, + []roachpb.StoreID{3}, + }, + { + threeInOneLocality, + []roachpb.StoreID{2, 3}, + []roachpb.StoreID{3}, + }, + { + threeInOneLocality, + []roachpb.StoreID{1, 2, 3}, + []roachpb.StoreID{3}, + }, + { + threeInOneLocality, + []roachpb.StoreID{3, 5, 7}, + []roachpb.StoreID{3, 5, 7}, + }, + { + threeInOneLocality, + []roachpb.StoreID{1, 2, 3, 5, 7}, + []roachpb.StoreID{3, 5, 7}, + }, + { + twoAndOneNodeAttrs, + []roachpb.StoreID{1, 3}, + []roachpb.StoreID{1, 3}, + }, + { + twoAndOneNodeAttrs, + []roachpb.StoreID{1, 2, 3}, + []roachpb.StoreID{1, 2}, + }, + { + twoAndOneNodeAttrs, + []roachpb.StoreID{1, 3, 6}, + []roachpb.StoreID{1, 3, 6}, + }, + { + twoAndOneNodeAttrs, + []roachpb.StoreID{1, 4, 6}, + []roachpb.StoreID{4, 6}, + }, + { + twoAndOneNodeAttrs, + []roachpb.StoreID{1, 2, 6}, + []roachpb.StoreID{2}, + }, + { + twoAndOneStoreAttrs, + []roachpb.StoreID{1, 2, 3}, + []roachpb.StoreID{1, 2}, + }, + { + twoAndOneStoreAttrs, + []roachpb.StoreID{1, 3, 6}, + []roachpb.StoreID{1, 3, 6}, + }, + { + twoAndOneStoreAttrs, + []roachpb.StoreID{1, 4, 6}, + []roachpb.StoreID{4, 6}, + }, + { + twoAndOneStoreAttrs, + []roachpb.StoreID{1, 2, 6}, + []roachpb.StoreID{2}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{1, 3, 6}, + []roachpb.StoreID{1, 3, 6}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{1, 2, 3}, + []roachpb.StoreID{1, 2}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{2, 3, 6}, + []roachpb.StoreID{2, 6}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{2, 3, 4}, + []roachpb.StoreID{4}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{5, 7}, + []roachpb.StoreID{5, 7}, + }, + { + // TODO(a-robinson): Should we prefer just 5 here for diversity reasons? + // We'd have to rework our handling of invalid stores in a handful of + // places, including in `candidateList.worst()`, to consider traits beyond + // just invalidity. + mixLocalityAndAttrs, + []roachpb.StoreID{5, 6, 7}, + []roachpb.StoreID{5, 7}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{1, 5, 7}, + []roachpb.StoreID{5, 7}, + }, + { + mixLocalityAndAttrs, + []roachpb.StoreID{1, 6, 8}, + []roachpb.StoreID{6, 8}, + }, + } + + for testIdx, tc := range testCases { + sl, _, _ := a.storePool.getStoreListFromIDs(tc.existing, roachpb.RangeID(0), storeFilterNone) + existingRepls := make([]roachpb.ReplicaDescriptor, len(tc.existing)) + for i, storeID := range tc.existing { + existingRepls[i] = roachpb.ReplicaDescriptor{ + NodeID: roachpb.NodeID(storeID), + StoreID: storeID, + } + } + rangeInfo := testRangeInfo(existingRepls, firstRange) + analyzed := analyzeConstraints( + context.Background(), a.storePool.getStoreDescriptor, rangeInfo.Desc.Replicas, tc.constraints) + candidates := removeCandidates( + sl, + analyzed, + rangeInfo, + a.storePool.getLocalities(existingRepls), + a.scorerOptions(false), + ) + if !expectedStoreIDsMatch(tc.expected, candidates.worst()) { + t.Errorf("%d: expected removeCandidates(%v) = %v, but got %v", + testIdx, tc.existing, tc.expected, candidates) + } + } +} + +func expectedStoreIDsMatch(expected []roachpb.StoreID, results candidateList) bool { + if len(expected) != len(results) { + return false + } + sort.Slice(results, func(i, j int) bool { + return results[i].store.StoreID < results[j].store.StoreID + }) + for i := range expected { + if expected[i] != results[i].store.StoreID { + return false + } + } + return true +} + +func TestRebalanceCandidatesNumReplicasConstraints(t *testing.T) { + defer leaktest.AfterTest(t)() + + stopper, g, _, a, _ := createTestAllocator( /* deterministic */ false) + defer stopper.Stop(context.Background()) + sg := gossiputil.NewStoreGossiper(g) + sg.GossipStores(multiDiversityDCStores, t) + sl, _, _ := a.storePool.getStoreList(firstRange, storeFilterThrottled) + + // Given a set of existing replicas for a range, rank which of the remaining + // stores would be best to remove if we had to remove one purely on the basis + // of constraint-matching and locality diversity. + type rebalanceStoreIDs struct { + existing []roachpb.StoreID + candidates []roachpb.StoreID + } + testCases := []struct { + constraints []config.Constraints + existing []roachpb.StoreID + expected []rebalanceStoreIDs + validTargets []roachpb.StoreID + }{ + { + constraints: threeSpecificLocalities, + existing: []roachpb.StoreID{1}, + expected: []rebalanceStoreIDs{}, // a store must be an improvement to justify rebalancing + validTargets: []roachpb.StoreID{}, + }, + { + constraints: threeSpecificLocalities, + existing: []roachpb.StoreID{1, 3}, + expected: []rebalanceStoreIDs{}, + validTargets: []roachpb.StoreID{}, + }, + { + constraints: threeSpecificLocalities, + existing: []roachpb.StoreID{1, 3, 5}, + expected: []rebalanceStoreIDs{}, + validTargets: []roachpb.StoreID{}, + }, + { + constraints: threeSpecificLocalities, + existing: []roachpb.StoreID{1, 2}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{1}, + candidates: []roachpb.StoreID{3, 4, 5, 6}, + }, + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{3, 4, 5, 6}, + }, + }, + validTargets: []roachpb.StoreID{3, 4, 5, 6}, + }, + { + constraints: threeSpecificLocalities, + existing: []roachpb.StoreID{1, 2, 3}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{1}, + candidates: []roachpb.StoreID{5, 6}, + }, + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{5, 6}, + }, + }, + validTargets: []roachpb.StoreID{5, 6}, + }, + { + constraints: threeSpecificLocalities, + existing: []roachpb.StoreID{1, 3, 7}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{7}, + candidates: []roachpb.StoreID{5, 6}, + }, + }, + validTargets: []roachpb.StoreID{5, 6}, + }, + { + constraints: threeSpecificLocalities, + existing: []roachpb.StoreID{1, 2, 7}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{1}, + candidates: []roachpb.StoreID{3, 4, 5, 6}, + }, + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{3, 4, 5, 6}, + }, + { + existing: []roachpb.StoreID{7}, + candidates: []roachpb.StoreID{3, 4, 5, 6}, + }, + }, + validTargets: []roachpb.StoreID{3, 4, 5, 6}, + }, + { + constraints: threeSpecificLocalities, + existing: []roachpb.StoreID{1, 7, 8}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{7}, + candidates: []roachpb.StoreID{3, 4, 5, 6}, + }, + { + existing: []roachpb.StoreID{8}, + candidates: []roachpb.StoreID{3, 4, 5, 6}, + }, + }, + validTargets: []roachpb.StoreID{3, 4, 5, 6}, + }, + { + constraints: twoAndOneLocalities, + existing: []roachpb.StoreID{1, 2, 3}, + expected: []rebalanceStoreIDs{}, + validTargets: []roachpb.StoreID{}, + }, + { + constraints: twoAndOneLocalities, + existing: []roachpb.StoreID{2, 3, 4}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{3}, + candidates: []roachpb.StoreID{1}, + }, + { + existing: []roachpb.StoreID{4}, + candidates: []roachpb.StoreID{1}, + }, + }, + validTargets: []roachpb.StoreID{1}, + }, + { + constraints: twoAndOneLocalities, + existing: []roachpb.StoreID{1, 2, 5}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{1}, + candidates: []roachpb.StoreID{3, 4}, + }, + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{3, 4}, + }, + { + existing: []roachpb.StoreID{5}, + candidates: []roachpb.StoreID{3, 4}, + }, + }, + validTargets: []roachpb.StoreID{3, 4}, + }, + { + constraints: twoAndOneLocalities, + existing: []roachpb.StoreID{1, 3, 5}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{5}, + candidates: []roachpb.StoreID{2}, + }, + }, + validTargets: []roachpb.StoreID{2}, + }, + { + constraints: twoAndOneLocalities, + existing: []roachpb.StoreID{1, 5, 6}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{5}, + candidates: []roachpb.StoreID{3, 4}, + }, + { + existing: []roachpb.StoreID{6}, + candidates: []roachpb.StoreID{3, 4}, + }, + }, + validTargets: []roachpb.StoreID{3, 4}, + }, + { + constraints: twoAndOneLocalities, + existing: []roachpb.StoreID{3, 5, 6}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{5}, + candidates: []roachpb.StoreID{1, 2}, + }, + { + existing: []roachpb.StoreID{6}, + candidates: []roachpb.StoreID{1, 2}, + }, + }, + validTargets: []roachpb.StoreID{1, 2}, + }, + { + constraints: twoAndOneLocalities, + existing: []roachpb.StoreID{1, 3, 4}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{3}, + candidates: []roachpb.StoreID{2}, + }, + { + existing: []roachpb.StoreID{4}, + candidates: []roachpb.StoreID{2}, + }, + }, + validTargets: []roachpb.StoreID{2}, + }, + { + constraints: threeInOneLocality, + existing: []roachpb.StoreID{1, 2, 3}, + expected: []rebalanceStoreIDs{}, + validTargets: []roachpb.StoreID{}, + }, + { + constraints: threeInOneLocality, + existing: []roachpb.StoreID{1, 3, 4}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{3}, + candidates: []roachpb.StoreID{2}, + }, + { + existing: []roachpb.StoreID{4}, + candidates: []roachpb.StoreID{2}, + }, + }, + validTargets: []roachpb.StoreID{2}, + }, + { + constraints: threeInOneLocality, + existing: []roachpb.StoreID{3, 4, 5}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{3}, + candidates: []roachpb.StoreID{1, 2}, + }, + { + existing: []roachpb.StoreID{4}, + candidates: []roachpb.StoreID{1, 2}, + }, + { + existing: []roachpb.StoreID{5}, + candidates: []roachpb.StoreID{1, 2}, + }, + }, + validTargets: []roachpb.StoreID{1, 2}, + }, { - []roachpb.StoreID{1, 2, 3}, - []roachpb.StoreID{5, 6, 7, 8}, + constraints: twoAndOneNodeAttrs, + existing: []roachpb.StoreID{1, 4, 5}, + expected: []rebalanceStoreIDs{}, + validTargets: []roachpb.StoreID{}, }, { - []roachpb.StoreID{1, 3, 4}, - []roachpb.StoreID{5, 6, 7, 8}, + constraints: twoAndOneNodeAttrs, + existing: []roachpb.StoreID{3, 6, 7}, + expected: []rebalanceStoreIDs{}, + validTargets: []roachpb.StoreID{}, }, { - []roachpb.StoreID{3, 4, 5}, - []roachpb.StoreID{1, 2, 7, 8}, + constraints: twoAndOneNodeAttrs, + existing: []roachpb.StoreID{1, 2, 3}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{1}, + candidates: []roachpb.StoreID{5, 7}, + }, + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{6, 8}, + }, + }, + validTargets: []roachpb.StoreID{5, 6, 7, 8}, }, { - []roachpb.StoreID{1, 7, 8}, - []roachpb.StoreID{3, 4, 5, 6}, + constraints: twoAndOneNodeAttrs, + existing: []roachpb.StoreID{2, 3, 4}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{1, 5, 7}, + }, + { + existing: []roachpb.StoreID{3}, + candidates: []roachpb.StoreID{5, 7}, + }, + { + existing: []roachpb.StoreID{4}, + candidates: []roachpb.StoreID{5, 7}, + }, + }, + validTargets: []roachpb.StoreID{5, 7}, }, { - []roachpb.StoreID{5, 7, 8}, - []roachpb.StoreID{1, 2, 3, 4}, + constraints: twoAndOneNodeAttrs, + existing: []roachpb.StoreID{2, 4, 5}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{1, 7}, + }, + { + existing: []roachpb.StoreID{4}, + candidates: []roachpb.StoreID{3, 7}, + }, + }, + validTargets: []roachpb.StoreID{1, 3, 7}, }, { - []roachpb.StoreID{1, 3, 5}, - []roachpb.StoreID{7, 8}, + constraints: twoAndOneNodeAttrs, + existing: []roachpb.StoreID{1, 3, 5}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{1}, + candidates: []roachpb.StoreID{2, 8}, + }, + { + existing: []roachpb.StoreID{3}, + candidates: []roachpb.StoreID{4, 8}, + }, + { + existing: []roachpb.StoreID{5}, + candidates: []roachpb.StoreID{6, 8}, + }, + }, + validTargets: []roachpb.StoreID{2, 4, 6, 8}, }, { - []roachpb.StoreID{1, 3, 7}, - []roachpb.StoreID{5, 6}, + constraints: twoAndOneNodeAttrs, + existing: []roachpb.StoreID{2, 4, 6}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{1, 7}, + }, + { + existing: []roachpb.StoreID{4}, + candidates: []roachpb.StoreID{3, 7}, + }, + { + existing: []roachpb.StoreID{6}, + candidates: []roachpb.StoreID{5, 7}, + }, + }, + validTargets: []roachpb.StoreID{1, 3, 5, 7}, }, { - []roachpb.StoreID{1, 5, 7}, - []roachpb.StoreID{3, 4}, + constraints: twoAndOneStoreAttrs, + existing: []roachpb.StoreID{1, 4, 5}, + expected: []rebalanceStoreIDs{}, + validTargets: []roachpb.StoreID{}, }, { - []roachpb.StoreID{3, 5, 7}, - []roachpb.StoreID{1, 2}, + constraints: twoAndOneStoreAttrs, + existing: []roachpb.StoreID{3, 6, 7}, + expected: []rebalanceStoreIDs{}, + validTargets: []roachpb.StoreID{}, }, - } - - for _, c := range testCases { - existingRepls := make([]roachpb.ReplicaDescriptor, len(c.existing)) - for i, storeID := range c.existing { - existingRepls[i] = roachpb.ReplicaDescriptor{ - NodeID: roachpb.NodeID(storeID), - StoreID: storeID, - } - } - targetStore, details, err := a.AllocateTarget( - context.Background(), - config.Constraints{}, - existingRepls, - testRangeInfo(existingRepls, firstRange), - false, - ) - if err != nil { - t.Fatal(err) - } - var found bool - for _, storeID := range c.expected { - if targetStore.StoreID == storeID { - found = true - break - } - } - if !found { - t.Errorf("expected AllocateTarget(%v) in %v, but got %d; details: %s", c.existing, c.expected, targetStore.StoreID, details) - } - } -} - -func TestAllocatorRebalanceTargetLocality(t *testing.T) { - defer leaktest.AfterTest(t)() - - stopper, g, _, a, _ := createTestAllocator( /* deterministic */ false) - defer stopper.Stop(context.Background()) - - stores := []*roachpb.StoreDescriptor{ { - StoreID: 1, - Node: multiDiversityDCStores[0].Node, - Capacity: roachpb.StoreCapacity{RangeCount: 10}, + constraints: twoAndOneStoreAttrs, + existing: []roachpb.StoreID{1, 2, 3}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{1}, + candidates: []roachpb.StoreID{5, 7}, + }, + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{6, 8}, + }, + }, + validTargets: []roachpb.StoreID{5, 6, 7, 8}, }, { - StoreID: 2, - Node: multiDiversityDCStores[1].Node, - Capacity: roachpb.StoreCapacity{RangeCount: 20}, + constraints: twoAndOneStoreAttrs, + existing: []roachpb.StoreID{2, 3, 4}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{1, 5, 7}, + }, + { + existing: []roachpb.StoreID{3}, + candidates: []roachpb.StoreID{5, 7}, + }, + { + existing: []roachpb.StoreID{4}, + candidates: []roachpb.StoreID{5, 7}, + }, + }, + validTargets: []roachpb.StoreID{5, 7}, }, { - StoreID: 3, - Node: multiDiversityDCStores[2].Node, - Capacity: roachpb.StoreCapacity{RangeCount: 10}, + constraints: twoAndOneStoreAttrs, + existing: []roachpb.StoreID{2, 4, 5}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{1, 7}, + }, + { + existing: []roachpb.StoreID{4}, + candidates: []roachpb.StoreID{3, 7}, + }, + }, + validTargets: []roachpb.StoreID{1, 3, 7}, }, { - StoreID: 4, - Node: multiDiversityDCStores[3].Node, - Capacity: roachpb.StoreCapacity{RangeCount: 20}, + constraints: twoAndOneStoreAttrs, + existing: []roachpb.StoreID{1, 3, 5}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{1}, + candidates: []roachpb.StoreID{2, 8}, + }, + { + existing: []roachpb.StoreID{3}, + candidates: []roachpb.StoreID{4, 8}, + }, + { + existing: []roachpb.StoreID{5}, + candidates: []roachpb.StoreID{6, 8}, + }, + }, + validTargets: []roachpb.StoreID{2, 4, 6, 8}, }, { - StoreID: 5, - Node: multiDiversityDCStores[4].Node, - Capacity: roachpb.StoreCapacity{RangeCount: 10}, + constraints: twoAndOneStoreAttrs, + existing: []roachpb.StoreID{2, 4, 6}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{1, 7}, + }, + { + existing: []roachpb.StoreID{4}, + candidates: []roachpb.StoreID{3, 7}, + }, + { + existing: []roachpb.StoreID{6}, + candidates: []roachpb.StoreID{5, 7}, + }, + }, + validTargets: []roachpb.StoreID{1, 3, 5, 7}, }, { - StoreID: 6, - Node: multiDiversityDCStores[5].Node, - Capacity: roachpb.StoreCapacity{RangeCount: 20}, + constraints: mixLocalityAndAttrs, + existing: []roachpb.StoreID{1, 3, 6}, + expected: []rebalanceStoreIDs{}, + validTargets: []roachpb.StoreID{}, }, - } - sg := gossiputil.NewStoreGossiper(g) - sg.GossipStores(stores, t) - - testCases := []struct { - existing []roachpb.StoreID - expected []roachpb.StoreID - }{ { - []roachpb.StoreID{1, 2, 3}, - []roachpb.StoreID{5}, + constraints: mixLocalityAndAttrs, + existing: []roachpb.StoreID{1, 3, 8}, + expected: []rebalanceStoreIDs{}, + validTargets: []roachpb.StoreID{}, }, { - []roachpb.StoreID{1, 3, 4}, - []roachpb.StoreID{5}, + constraints: mixLocalityAndAttrs, + existing: []roachpb.StoreID{1, 5, 8}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{5}, + candidates: []roachpb.StoreID{3}, + }, + }, + validTargets: []roachpb.StoreID{3}, }, { - []roachpb.StoreID{1, 3, 6}, - []roachpb.StoreID{5}, + constraints: mixLocalityAndAttrs, + existing: []roachpb.StoreID{1, 5, 6}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{5}, + candidates: []roachpb.StoreID{3}, + }, + { + existing: []roachpb.StoreID{6}, + candidates: []roachpb.StoreID{3, 4, 8}, + }, + }, + validTargets: []roachpb.StoreID{3}, }, { - []roachpb.StoreID{1, 2, 5}, - []roachpb.StoreID{3}, + constraints: mixLocalityAndAttrs, + existing: []roachpb.StoreID{1, 3, 5}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{5}, + candidates: []roachpb.StoreID{6, 8}, + }, + }, + validTargets: []roachpb.StoreID{6, 8}, }, { - []roachpb.StoreID{1, 2, 6}, - []roachpb.StoreID{3}, + constraints: mixLocalityAndAttrs, + existing: []roachpb.StoreID{1, 2, 3}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{6, 8}, + }, + }, + validTargets: []roachpb.StoreID{6, 8}, }, { - []roachpb.StoreID{1, 4, 5}, - []roachpb.StoreID{3}, + constraints: mixLocalityAndAttrs, + existing: []roachpb.StoreID{1, 3, 4}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{4}, + candidates: []roachpb.StoreID{6, 8}, + }, + }, + validTargets: []roachpb.StoreID{6, 8}, }, { - []roachpb.StoreID{1, 4, 6}, - []roachpb.StoreID{3, 5}, + constraints: mixLocalityAndAttrs, + existing: []roachpb.StoreID{2, 3, 4}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{2}, + candidates: []roachpb.StoreID{1}, + }, + { + existing: []roachpb.StoreID{4}, + candidates: []roachpb.StoreID{1}, + }, + }, + validTargets: []roachpb.StoreID{1}, }, { - []roachpb.StoreID{3, 4, 5}, - []roachpb.StoreID{1}, + constraints: mixLocalityAndAttrs, + existing: []roachpb.StoreID{5, 6, 7}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{5}, + candidates: []roachpb.StoreID{1, 3}, + }, + { + existing: []roachpb.StoreID{6}, + candidates: []roachpb.StoreID{1, 2, 3, 4}, + }, + { + existing: []roachpb.StoreID{7}, + candidates: []roachpb.StoreID{1, 3}, + }, + }, + validTargets: []roachpb.StoreID{1, 3}, }, { - []roachpb.StoreID{3, 4, 6}, - []roachpb.StoreID{1}, + constraints: mixLocalityAndAttrs, + existing: []roachpb.StoreID{6, 7, 8}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{6}, + candidates: []roachpb.StoreID{1, 3}, + }, + { + existing: []roachpb.StoreID{7}, + candidates: []roachpb.StoreID{1, 3}, + }, + { + existing: []roachpb.StoreID{8}, + candidates: []roachpb.StoreID{1, 3}, + }, + }, + validTargets: []roachpb.StoreID{1, 3}, }, { - []roachpb.StoreID{4, 5, 6}, - []roachpb.StoreID{1}, + constraints: mixLocalityAndAttrs, + existing: []roachpb.StoreID{1, 6, 8}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{6}, + candidates: []roachpb.StoreID{3}, + }, + { + existing: []roachpb.StoreID{8}, + candidates: []roachpb.StoreID{3}, + }, + }, + validTargets: []roachpb.StoreID{3}, }, { - []roachpb.StoreID{2, 4, 6}, - []roachpb.StoreID{1, 3, 5}, + constraints: mixLocalityAndAttrs, + existing: []roachpb.StoreID{1, 5, 7}, + expected: []rebalanceStoreIDs{ + { + existing: []roachpb.StoreID{5}, + candidates: []roachpb.StoreID{3, 4, 6}, + }, + { + existing: []roachpb.StoreID{7}, + candidates: []roachpb.StoreID{3, 4, 8}, + }, + }, + validTargets: []roachpb.StoreID{3, 4, 6, 8}, }, } - for _, c := range testCases { - existingRepls := make([]roachpb.ReplicaDescriptor, len(c.existing)) - for i, storeID := range c.existing { + for testIdx, tc := range testCases { + existingRepls := make([]roachpb.ReplicaDescriptor, len(tc.existing)) + for i, storeID := range tc.existing { existingRepls[i] = roachpb.ReplicaDescriptor{ NodeID: roachpb.NodeID(storeID), StoreID: storeID, } } - targetStore, details := a.RebalanceTarget( + rangeInfo := testRangeInfo(existingRepls, firstRange) + analyzed := analyzeConstraints( + context.Background(), a.storePool.getStoreDescriptor, rangeInfo.Desc.Replicas, tc.constraints) + results := rebalanceCandidates( context.Background(), - config.Constraints{}, - nil, - testRangeInfo(existingRepls, firstRange), - storeFilterThrottled, - false, + sl, + analyzed, + rangeInfo, + a.storePool.getLocalities(existingRepls), + a.storePool.getNodeLocalityString, + a.scorerOptions(false), ) - if targetStore == nil { - t.Fatalf("RebalanceTarget(%v) returned no target store; details: %s", c.existing, details) + match := true + if len(tc.expected) != len(results) { + match = false + } else { + sort.Slice(results, func(i, j int) bool { + return results[i].existingCandidates[0].store.StoreID < results[j].existingCandidates[0].store.StoreID + }) + for i := range tc.expected { + if !expectedStoreIDsMatch(tc.expected[i].existing, results[i].existingCandidates) || + !expectedStoreIDsMatch(tc.expected[i].candidates, results[i].candidates) { + match = false + break + } + } } - var found bool - for _, storeID := range c.expected { - if targetStore.StoreID == storeID { + if !match { + t.Errorf("%d: expected rebalanceCandidates(%v) = %v, but got %v", + testIdx, tc.existing, tc.expected, results) + } else { + // Also verify that RebalanceTarget picks out one of the best options as + // the final rebalance choice. + target, details := a.RebalanceTarget( + context.Background(), + tc.constraints, + nil, + rangeInfo, + storeFilterThrottled, + false) + var found bool + if target == nil && len(tc.validTargets) == 0 { found = true - break } - } - if !found { - t.Errorf("expected RebalanceTarget(%v) in %v, but got %d; details: %s", c.existing, c.expected, targetStore.StoreID, details) + for _, storeID := range tc.validTargets { + if storeID == target.StoreID { + found = true + break + } + } + if !found { + t.Errorf("%d: expected RebalanceTarget(%v) to be in %v, but got %v; details: %s", + testIdx, tc.existing, tc.validTargets, target, details) + } } } } @@ -1684,7 +2942,7 @@ func TestAllocatorTransferLeaseTargetLoadBased(t *testing.T) { }) target := a.TransferLeaseTarget( context.Background(), - config.Constraints{}, + nil, /* constraints */ existing, c.leaseholder, 0, @@ -1885,7 +3143,7 @@ func TestAllocatorRemoveTarget(t *testing.T) { for i := 0; i < 10; i++ { targetRepl, _, err := a.RemoveTarget( ctx, - config.Constraints{}, + nil, /* constraints */ replicas, testRangeInfo(replicas, firstRange), false, @@ -1914,7 +3172,7 @@ func TestAllocatorComputeAction(t *testing.T) { { zone: config.ZoneConfig{ NumReplicas: 3, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "us-east"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "us-east", Type: config.Constraint_DEPRECATED_POSITIVE}}}}, RangeMinBytes: 0, RangeMaxBytes: 64000, }, @@ -1938,7 +3196,7 @@ func TestAllocatorComputeAction(t *testing.T) { { zone: config.ZoneConfig{ NumReplicas: 5, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "us-east"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "us-east", Type: config.Constraint_DEPRECATED_POSITIVE}}}}, RangeMinBytes: 0, RangeMaxBytes: 64000, }, @@ -1972,7 +3230,7 @@ func TestAllocatorComputeAction(t *testing.T) { { zone: config.ZoneConfig{ NumReplicas: 5, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "us-east"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "us-east", Type: config.Constraint_DEPRECATED_POSITIVE}}}}, RangeMinBytes: 0, RangeMaxBytes: 64000, }, @@ -2006,7 +3264,7 @@ func TestAllocatorComputeAction(t *testing.T) { { zone: config.ZoneConfig{ NumReplicas: 3, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "us-east"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "us-east", Type: config.Constraint_DEPRECATED_POSITIVE}}}}, RangeMinBytes: 0, RangeMaxBytes: 64000, }, @@ -2035,7 +3293,7 @@ func TestAllocatorComputeAction(t *testing.T) { { zone: config.ZoneConfig{ NumReplicas: 3, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "us-east"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "us-east", Type: config.Constraint_DEPRECATED_POSITIVE}}}}, RangeMinBytes: 0, RangeMaxBytes: 64000, }, @@ -2064,7 +3322,7 @@ func TestAllocatorComputeAction(t *testing.T) { { zone: config.ZoneConfig{ NumReplicas: 5, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "us-east"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "us-east", Type: config.Constraint_DEPRECATED_POSITIVE}}}}, RangeMinBytes: 0, RangeMaxBytes: 64000, }, @@ -2103,7 +3361,7 @@ func TestAllocatorComputeAction(t *testing.T) { { zone: config.ZoneConfig{ NumReplicas: 3, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "us-east"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "us-east", Type: config.Constraint_DEPRECATED_POSITIVE}}}}, RangeMinBytes: 0, RangeMaxBytes: 64000, }, @@ -2137,7 +3395,7 @@ func TestAllocatorComputeAction(t *testing.T) { { zone: config.ZoneConfig{ NumReplicas: 3, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "us-east"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "us-east", Type: config.Constraint_DEPRECATED_POSITIVE}}}}, RangeMinBytes: 0, RangeMaxBytes: 64000, }, @@ -2178,7 +3436,7 @@ func TestAllocatorComputeAction(t *testing.T) { { zone: config.ZoneConfig{ NumReplicas: 3, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "us-east"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "us-east", Type: config.Constraint_DEPRECATED_POSITIVE}}}}, RangeMinBytes: 0, RangeMaxBytes: 64000, }, @@ -2207,7 +3465,7 @@ func TestAllocatorComputeAction(t *testing.T) { { zone: config.ZoneConfig{ NumReplicas: 3, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "us-east"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "us-east", Type: config.Constraint_DEPRECATED_POSITIVE}}}}, RangeMinBytes: 0, RangeMaxBytes: 64000, }, @@ -2236,7 +3494,7 @@ func TestAllocatorComputeAction(t *testing.T) { { zone: config.ZoneConfig{ NumReplicas: 3, - Constraints: config.Constraints{Constraints: []config.Constraint{{Value: "us-east"}}}, + Constraints: []config.Constraints{{Constraints: []config.Constraint{{Value: "us-east", Type: config.Constraint_DEPRECATED_POSITIVE}}}}, RangeMinBytes: 0, RangeMaxBytes: 64000, }, @@ -2374,7 +3632,7 @@ func TestAllocatorRebalanceTargetDisableStatsRebalance(t *testing.T) { for i := 0; i < 50; i++ { target, _ := a.RebalanceTarget( context.Background(), - config.Constraints{}, + nil, /* constraints */ nil, testRangeInfo(desc.Replicas, desc.RangeID), storeFilterThrottled, @@ -2388,7 +3646,7 @@ func TestAllocatorRebalanceTargetDisableStatsRebalance(t *testing.T) { for i := 0; i < 50; i++ { target, _ := a.RebalanceTarget( context.Background(), - config.Constraints{}, + nil, /* constraints */ nil, testRangeInfo(desc.Replicas, desc.RangeID), storeFilterThrottled, @@ -2726,8 +3984,17 @@ func TestAllocatorComputeActionNoStorePool(t *testing.T) { func TestAllocatorError(t *testing.T) { defer leaktest.AfterTest(t)() - constraint := []config.Constraint{{Value: "one"}} - constraints := []config.Constraint{{Value: "one"}, {Value: "two"}} + constraint := []config.Constraints{ + {Constraints: []config.Constraint{{Value: "one", Type: config.Constraint_REQUIRED}}}, + } + constraints := []config.Constraints{ + { + Constraints: []config.Constraint{ + {Value: "one", Type: config.Constraint_REQUIRED}, + {Value: "two", Type: config.Constraint_REQUIRED}, + }, + }, + } testCases := []struct { ae allocatorError @@ -2736,13 +4003,13 @@ func TestAllocatorError(t *testing.T) { {allocatorError{nil, 1}, "0 of 1 store with attributes matching []; likely not enough nodes in cluster"}, {allocatorError{constraint, 1}, - "0 of 1 store with attributes matching [one]"}, + "0 of 1 store with attributes matching [{0 [+one]}]"}, {allocatorError{constraint, 2}, - "0 of 2 stores with attributes matching [one]"}, + "0 of 2 stores with attributes matching [{0 [+one]}]"}, {allocatorError{constraints, 1}, - "0 of 1 store with attributes matching [one two]"}, + "0 of 1 store with attributes matching [{0 [+one +two]}]"}, {allocatorError{constraints, 2}, - "0 of 2 stores with attributes matching [one two]"}, + "0 of 2 stores with attributes matching [{0 [+one +two]}]"}, } for i, testCase := range testCases { @@ -3038,18 +4305,17 @@ func TestAllocatorRebalanceAway(t *testing.T) { expected: nil, }, { - constraint: config.Constraint{Key: "datacenter", Value: "other", Type: config.Constraint_POSITIVE}, + constraint: config.Constraint{Key: "datacenter", Value: "other", Type: config.Constraint_DEPRECATED_POSITIVE}, + expected: nil, + }, + { + constraint: config.Constraint{Key: "datacenter", Value: "us", Type: config.Constraint_DEPRECATED_POSITIVE}, + expected: nil, + }, + { + constraint: config.Constraint{Key: "datacenter", Value: "eur", Type: config.Constraint_DEPRECATED_POSITIVE}, expected: nil, }, - // TODO(bram): re-enable these test cases once #14163 is resolved. - // { - // constraint: config.Constraint{Key: "datacenter", Value: "us", Type: config.Constraint_POSITIVE}, - // expected: &stores[3].StoreID, - // }, - // { - // constraint: config.Constraint{Key: "datacenter", Value: "eur", Type: config.Constraint_POSITIVE}, - // expected: &stores[4].StoreID, - // }, } stopper, g, _, a, _ := createTestAllocator( /* deterministic */ false) @@ -3067,7 +4333,7 @@ func TestAllocatorRebalanceAway(t *testing.T) { actual, _ := a.RebalanceTarget( ctx, - constraints, + []config.Constraints{constraints}, nil, testRangeInfo(existingReplicas, firstRange), storeFilterThrottled, @@ -3226,7 +4492,7 @@ func TestAllocatorFullDisks(t *testing.T) { if ts.Capacity.RangeCount > 0 { target, details := alloc.RebalanceTarget( ctx, - config.Constraints{}, + nil, /* constraints */ nil, testRangeInfo([]roachpb.ReplicaDescriptor{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}}, firstRange), storeFilterThrottled, @@ -3346,7 +4612,7 @@ func Example_rebalancing() { ts := &testStores[j] target, details := alloc.RebalanceTarget( context.Background(), - config.Constraints{}, + nil, /* constraints */ nil, testRangeInfo([]roachpb.ReplicaDescriptor{{NodeID: ts.Node.NodeID, StoreID: ts.StoreID}}, firstRange), storeFilterThrottled, diff --git a/pkg/storage/store_pool.go b/pkg/storage/store_pool.go index da00c29a644a..230e90468b2f 100644 --- a/pkg/storage/store_pool.go +++ b/pkg/storage/store_pool.go @@ -510,13 +510,13 @@ func (sl StoreList) String() string { // filter takes a store list and filters it using the passed in constraints. It // maintains the original order of the passed in store list. -func (sl StoreList) filter(constraints config.Constraints) StoreList { - if len(constraints.Constraints) == 0 { +func (sl StoreList) filter(constraints []config.Constraints) StoreList { + if len(constraints) == 0 { return sl } var filteredDescs []roachpb.StoreDescriptor for _, store := range sl.stores { - if ok, _ := constraintCheck(store, constraints); ok { + if ok := constraintsCheck(store, constraints); ok { filteredDescs = append(filteredDescs, store) } } diff --git a/pkg/storage/store_pool_test.go b/pkg/storage/store_pool_test.go index b073f27a08c3..66f418916720 100644 --- a/pkg/storage/store_pool_test.go +++ b/pkg/storage/store_pool_test.go @@ -143,7 +143,7 @@ func TestStorePoolGossipUpdate(t *testing.T) { // verifyStoreList ensures that the returned list of stores is correct. func verifyStoreList( sp *StorePool, - constraints config.Constraints, + constraints []config.Constraints, rangeID roachpb.RangeID, expected []int, filter storeFilter, @@ -181,10 +181,12 @@ func TestStorePoolGetStoreList(t *testing.T) { TestTimeUntilStoreDead, false /* deterministic */, NodeLivenessStatus_DEAD) defer stopper.Stop(context.TODO()) sg := gossiputil.NewStoreGossiper(g) - constraints := config.Constraints{ - Constraints: []config.Constraint{ - {Type: config.Constraint_REQUIRED, Value: "ssd"}, - {Type: config.Constraint_REQUIRED, Value: "dc"}, + constraints := []config.Constraints{ + { + Constraints: []config.Constraint{ + {Type: config.Constraint_REQUIRED, Value: "ssd"}, + {Type: config.Constraint_REQUIRED, Value: "dc"}, + }, }, } required := []string{"ssd", "dc"} @@ -334,12 +336,14 @@ func TestStorePoolGetStoreList(t *testing.T) { func TestStoreListFilter(t *testing.T) { defer leaktest.AfterTest(t)() - constraints := config.Constraints{ - Constraints: []config.Constraint{ - {Type: config.Constraint_REQUIRED, Key: "region", Value: "us-west"}, - {Type: config.Constraint_REQUIRED, Value: "MustMatch"}, - {Type: config.Constraint_POSITIVE, Value: "MatchingOptional"}, - {Type: config.Constraint_PROHIBITED, Value: "MustNotMatch"}, + constraints := []config.Constraints{ + { + Constraints: []config.Constraint{ + {Type: config.Constraint_REQUIRED, Key: "region", Value: "us-west"}, + {Type: config.Constraint_REQUIRED, Value: "MustMatch"}, + {Type: config.Constraint_DEPRECATED_POSITIVE, Value: "MatchingOptional"}, + {Type: config.Constraint_PROHIBITED, Value: "MustNotMatch"}, + }, }, }