diff --git a/internal/cortex/ingester/client/compat.go b/internal/cortex/ingester/client/compat.go deleted file mode 100644 index afaf71134b..0000000000 --- a/internal/cortex/ingester/client/compat.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package client - -import ( - "fmt" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/thanos-io/thanos/internal/cortex/cortexpb" -) - -// ToQueryRequest builds a QueryRequest proto. -func ToQueryRequest(from, to model.Time, matchers []*labels.Matcher) (*QueryRequest, error) { - ms, err := toLabelMatchers(matchers) - if err != nil { - return nil, err - } - - return &QueryRequest{ - StartTimestampMs: int64(from), - EndTimestampMs: int64(to), - Matchers: ms, - }, nil -} - -// FromQueryRequest unpacks a QueryRequest proto. -func FromQueryRequest(req *QueryRequest) (model.Time, model.Time, []*labels.Matcher, error) { - matchers, err := FromLabelMatchers(req.Matchers) - if err != nil { - return 0, 0, nil, err - } - from := model.Time(req.StartTimestampMs) - to := model.Time(req.EndTimestampMs) - return from, to, matchers, nil -} - -// ToQueryResponse builds a QueryResponse proto. -func ToQueryResponse(matrix model.Matrix) *QueryResponse { - resp := &QueryResponse{} - for _, ss := range matrix { - ts := cortexpb.TimeSeries{ - Labels: cortexpb.FromMetricsToLabelAdapters(ss.Metric), - Samples: make([]cortexpb.Sample, 0, len(ss.Values)), - } - for _, s := range ss.Values { - ts.Samples = append(ts.Samples, cortexpb.Sample{ - Value: float64(s.Value), - TimestampMs: int64(s.Timestamp), - }) - } - resp.Timeseries = append(resp.Timeseries, ts) - } - return resp -} - -// FromQueryResponse unpacks a QueryResponse proto. -func FromQueryResponse(resp *QueryResponse) model.Matrix { - m := make(model.Matrix, 0, len(resp.Timeseries)) - for _, ts := range resp.Timeseries { - var ss model.SampleStream - ss.Metric = cortexpb.FromLabelAdaptersToMetric(ts.Labels) - ss.Values = make([]model.SamplePair, 0, len(ts.Samples)) - for _, s := range ts.Samples { - ss.Values = append(ss.Values, model.SamplePair{ - Value: model.SampleValue(s.Value), - Timestamp: model.Time(s.TimestampMs), - }) - } - m = append(m, &ss) - } - - return m -} - -func toLabelMatchers(matchers []*labels.Matcher) ([]*LabelMatcher, error) { - result := make([]*LabelMatcher, 0, len(matchers)) - for _, matcher := range matchers { - var mType MatchType - switch matcher.Type { - case labels.MatchEqual: - mType = EQUAL - case labels.MatchNotEqual: - mType = NOT_EQUAL - case labels.MatchRegexp: - mType = REGEX_MATCH - case labels.MatchNotRegexp: - mType = REGEX_NO_MATCH - default: - return nil, fmt.Errorf("invalid matcher type") - } - result = append(result, &LabelMatcher{ - Type: mType, - Name: matcher.Name, - Value: matcher.Value, - }) - } - return result, nil -} - -func FromLabelMatchers(matchers []*LabelMatcher) ([]*labels.Matcher, error) { - result := make([]*labels.Matcher, 0, len(matchers)) - for _, matcher := range matchers { - var mtype labels.MatchType - switch matcher.Type { - case EQUAL: - mtype = labels.MatchEqual - case NOT_EQUAL: - mtype = labels.MatchNotEqual - case REGEX_MATCH: - mtype = labels.MatchRegexp - case REGEX_NO_MATCH: - mtype = labels.MatchNotRegexp - default: - return nil, fmt.Errorf("invalid matcher type") - } - matcher, err := labels.NewMatcher(mtype, matcher.Name, matcher.Value) - if err != nil { - return nil, err - } - result = append(result, matcher) - } - return result, nil -} - -// Fingerprint runs the same algorithm as Prometheus labelSetToFingerprint() -func Fingerprint(labels labels.Labels) model.Fingerprint { - sum := hashNew() - for _, label := range labels { - sum = hashAddString(sum, label.Name) - sum = hashAddByte(sum, model.SeparatorByte) - sum = hashAddString(sum, label.Value) - sum = hashAddByte(sum, model.SeparatorByte) - } - return model.Fingerprint(sum) -} - -// LabelsToKeyString is used to form a string to be used as -// the hashKey. Don't print, use l.String() for printing. -func LabelsToKeyString(l labels.Labels) string { - // We are allocating 1024, even though most series are less than 600b long. - // But this is not an issue as this function is being inlined when called in a loop - // and buffer allocated is a static buffer and not a dynamic buffer on the heap. - b := make([]byte, 0, 1024) - return string(l.Bytes(b)) -} diff --git a/internal/cortex/ingester/client/compat_test.go b/internal/cortex/ingester/client/compat_test.go deleted file mode 100644 index f6b3a6eb9a..0000000000 --- a/internal/cortex/ingester/client/compat_test.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package client - -import ( - "fmt" - "reflect" - "sort" - "strconv" - "testing" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" -) - -func TestQueryRequest(t *testing.T) { - from, to := model.Time(int64(0)), model.Time(int64(10)) - matchers := []*labels.Matcher{} - matcher1, err := labels.NewMatcher(labels.MatchEqual, "foo", "1") - if err != nil { - t.Fatal(err) - } - matchers = append(matchers, matcher1) - - matcher2, err := labels.NewMatcher(labels.MatchNotEqual, "bar", "2") - if err != nil { - t.Fatal(err) - } - matchers = append(matchers, matcher2) - - matcher3, err := labels.NewMatcher(labels.MatchRegexp, "baz", "3") - if err != nil { - t.Fatal(err) - } - matchers = append(matchers, matcher3) - - matcher4, err := labels.NewMatcher(labels.MatchNotRegexp, "bop", "4") - if err != nil { - t.Fatal(err) - } - matchers = append(matchers, matcher4) - - req, err := ToQueryRequest(from, to, matchers) - if err != nil { - t.Fatal(err) - } - - haveFrom, haveTo, haveMatchers, err := FromQueryRequest(req) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(haveFrom, from) { - t.Fatalf("Bad from FromQueryRequest(ToQueryRequest) round trip") - } - if !reflect.DeepEqual(haveTo, to) { - t.Fatalf("Bad to FromQueryRequest(ToQueryRequest) round trip") - } - if !reflect.DeepEqual(haveMatchers, matchers) { - t.Fatalf("Bad have FromQueryRequest(ToQueryRequest) round trip - %v != %v", haveMatchers, matchers) - } -} - -func buildTestMatrix(numSeries int, samplesPerSeries int, offset int) model.Matrix { - m := make(model.Matrix, 0, numSeries) - for i := 0; i < numSeries; i++ { - ss := model.SampleStream{ - Metric: model.Metric{ - model.MetricNameLabel: model.LabelValue(fmt.Sprintf("testmetric_%d", i)), - model.JobLabel: "testjob", - }, - Values: make([]model.SamplePair, 0, samplesPerSeries), - } - for j := 0; j < samplesPerSeries; j++ { - ss.Values = append(ss.Values, model.SamplePair{ - Timestamp: model.Time(i + j + offset), - Value: model.SampleValue(i + j + offset), - }) - } - m = append(m, &ss) - } - sort.Sort(m) - return m -} - -func TestQueryResponse(t *testing.T) { - want := buildTestMatrix(10, 10, 10) - have := FromQueryResponse(ToQueryResponse(want)) - if !reflect.DeepEqual(have, want) { - t.Fatalf("Bad FromQueryResponse(ToQueryResponse) round trip") - } -} - -// This test shows label sets with same fingerprints, and also shows how to easily create new collisions -// (by adding "_" or "A" label with specific values, see below). -func TestFingerprintCollisions(t *testing.T) { - // "8yn0iYCKYHlIj4-BwPqk" and "GReLUrM4wMqfg9yzV3KQ" have same FNV-1a hash. - // If we use it as a single label name (for labels that have same value), we get colliding labels. - c1 := labels.FromStrings("8yn0iYCKYHlIj4-BwPqk", "hello") - c2 := labels.FromStrings("GReLUrM4wMqfg9yzV3KQ", "hello") - verifyCollision(t, true, c1, c2) - - // Adding _="ypfajYg2lsv" or _="KiqbryhzUpn" respectively to most metrics will produce collision. - // It's because "_\xffypfajYg2lsv" and "_\xffKiqbryhzUpn" have same FNV-1a hash, and "_" label is sorted before - // most other labels (except labels starting with upper-case letter) - - const _label1 = "ypfajYg2lsv" - const _label2 = "KiqbryhzUpn" - - metric := labels.NewBuilder(labels.FromStrings("__name__", "logs")) - c1 = metric.Set("_", _label1).Labels(nil) - c2 = metric.Set("_", _label2).Labels(nil) - verifyCollision(t, true, c1, c2) - - metric = labels.NewBuilder(labels.FromStrings("__name__", "up", "instance", "hello")) - c1 = metric.Set("_", _label1).Labels(nil) - c2 = metric.Set("_", _label2).Labels(nil) - verifyCollision(t, true, c1, c2) - - // here it breaks, because "Z" label is sorted before "_" label. - metric = labels.NewBuilder(labels.FromStrings("__name__", "up", "Z", "hello")) - c1 = metric.Set("_", _label1).Labels(nil) - c2 = metric.Set("_", _label2).Labels(nil) - verifyCollision(t, false, c1, c2) - - // A="K6sjsNNczPl" and A="cswpLMIZpwt" label has similar property. - // (Again, because "A\xffK6sjsNNczPl" and "A\xffcswpLMIZpwt" have same FNV-1a hash) - // This time, "A" is the smallest possible label name, and is always sorted first. - - const Alabel1 = "K6sjsNNczPl" - const Alabel2 = "cswpLMIZpwt" - - metric = labels.NewBuilder(labels.FromStrings("__name__", "up", "Z", "hello")) - c1 = metric.Set("A", Alabel1).Labels(nil) - c2 = metric.Set("A", Alabel2).Labels(nil) - verifyCollision(t, true, c1, c2) - - // Adding the same suffix to the "A" label also works. - metric = labels.NewBuilder(labels.FromStrings("__name__", "up", "Z", "hello")) - c1 = metric.Set("A", Alabel1+"suffix").Labels(nil) - c2 = metric.Set("A", Alabel2+"suffix").Labels(nil) - verifyCollision(t, true, c1, c2) -} - -func verifyCollision(t *testing.T, collision bool, ls1 labels.Labels, ls2 labels.Labels) { - if collision && Fingerprint(ls1) != Fingerprint(ls2) { - t.Errorf("expected same fingerprints for %v (%016x) and %v (%016x)", ls1.String(), Fingerprint(ls1), ls2.String(), Fingerprint(ls2)) - } else if !collision && Fingerprint(ls1) == Fingerprint(ls2) { - t.Errorf("expected different fingerprints for %v (%016x) and %v (%016x)", ls1.String(), Fingerprint(ls1), ls2.String(), Fingerprint(ls2)) - } -} - -// The main usecase for `LabelsToKeyString` is to generate hashKeys -// for maps. We are benchmarking that here. -func BenchmarkSeriesMap(b *testing.B) { - benchmarkSeriesMap(100000, b) -} - -func benchmarkSeriesMap(numSeries int, b *testing.B) { - series := makeSeries(numSeries) - sm := make(map[string]int, numSeries) - - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - for i, s := range series { - sm[LabelsToKeyString(s)] = i - } - - for _, s := range series { - _, ok := sm[LabelsToKeyString(s)] - if !ok { - b.Fatal("element missing") - } - } - - if len(sm) != numSeries { - b.Fatal("the number of series expected:", numSeries, "got:", len(sm)) - } - } -} - -func makeSeries(n int) []labels.Labels { - series := make([]labels.Labels, 0, n) - for i := 0; i < n; i++ { - series = append(series, labels.FromMap(map[string]string{ - "label0": "value0", - "label1": "value1", - "label2": "value2", - "label3": "value3", - "label4": "value4", - "label5": "value5", - "label6": "value6", - "label7": "value7", - "label8": "value8", - "label9": strconv.Itoa(i), - })) - } - - return series -} diff --git a/internal/cortex/ingester/client/fnv.go b/internal/cortex/ingester/client/fnv.go deleted file mode 100644 index 76c46d3904..0000000000 --- a/internal/cortex/ingester/client/fnv.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -// Modified from github.com/prometheus/common/model/fnv.go -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 - offset32 = 2166136261 - prime32 = 16777619 -) - -// hashNew initializes a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -// Note this is the same algorithm as Go stdlib `sum64a.Write()` -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAddString(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} - -// HashNew32 initializies a new fnv32 hash value. -func HashNew32() uint32 { - return offset32 -} - -// HashAdd32 adds a string to a fnv32 hash value, returning the updated hash. -// Note this is the same algorithm as Go stdlib `sum32.Write()` -func HashAdd32(h uint32, s string) uint32 { - for i := 0; i < len(s); i++ { - h *= prime32 - h ^= uint32(s[i]) - } - return h -} - -// HashAddByte32 adds a byte to a fnv32 hash value, returning the updated hash. -func HashAddByte32(h uint32, b byte) uint32 { - h *= prime32 - h ^= uint32(b) - return h -} - -// HashNew32a initializies a new fnv32a hash value. -func HashNew32a() uint32 { - return offset32 -} - -// HashAdd32a adds a string to a fnv32a hash value, returning the updated hash. -// Note this is the same algorithm as Go stdlib `sum32.Write()` -func HashAdd32a(h uint32, s string) uint32 { - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= prime32 - } - return h -} - -// HashAddByte32a adds a byte to a fnv32a hash value, returning the updated hash. -func HashAddByte32a(h uint32, b byte) uint32 { - h ^= uint32(b) - h *= prime32 - return h -} diff --git a/internal/cortex/ingester/client/ingester.pb.go b/internal/cortex/ingester/client/ingester.pb.go deleted file mode 100644 index fe103e677a..0000000000 --- a/internal/cortex/ingester/client/ingester.pb.go +++ /dev/null @@ -1,8466 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: ingester.proto - -// TODO: Rename to ingesterpb - -package client - -import ( - bytes "bytes" - context "context" - encoding_binary "encoding/binary" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - cortexpb "github.com/thanos-io/thanos/internal/cortex/cortexpb" - github_com_cortexproject_cortex_pkg_cortexpb "github.com/thanos-io/thanos/internal/cortex/cortexpb" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strconv "strconv" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type MatchType int32 - -const ( - EQUAL MatchType = 0 - NOT_EQUAL MatchType = 1 - REGEX_MATCH MatchType = 2 - REGEX_NO_MATCH MatchType = 3 -) - -var MatchType_name = map[int32]string{ - 0: "EQUAL", - 1: "NOT_EQUAL", - 2: "REGEX_MATCH", - 3: "REGEX_NO_MATCH", -} - -var MatchType_value = map[string]int32{ - "EQUAL": 0, - "NOT_EQUAL": 1, - "REGEX_MATCH": 2, - "REGEX_NO_MATCH": 3, -} - -func (MatchType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{0} -} - -type ReadRequest struct { - Queries []*QueryRequest `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` -} - -func (m *ReadRequest) Reset() { *m = ReadRequest{} } -func (*ReadRequest) ProtoMessage() {} -func (*ReadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{0} -} -func (m *ReadRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadRequest.Merge(m, src) -} -func (m *ReadRequest) XXX_Size() int { - return m.Size() -} -func (m *ReadRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReadRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadRequest proto.InternalMessageInfo - -func (m *ReadRequest) GetQueries() []*QueryRequest { - if m != nil { - return m.Queries - } - return nil -} - -type ReadResponse struct { - Results []*QueryResponse `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` -} - -func (m *ReadResponse) Reset() { *m = ReadResponse{} } -func (*ReadResponse) ProtoMessage() {} -func (*ReadResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{1} -} -func (m *ReadResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResponse.Merge(m, src) -} -func (m *ReadResponse) XXX_Size() int { - return m.Size() -} -func (m *ReadResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadResponse proto.InternalMessageInfo - -func (m *ReadResponse) GetResults() []*QueryResponse { - if m != nil { - return m.Results - } - return nil -} - -type QueryRequest struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` -} - -func (m *QueryRequest) Reset() { *m = QueryRequest{} } -func (*QueryRequest) ProtoMessage() {} -func (*QueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{2} -} -func (m *QueryRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryRequest.Merge(m, src) -} -func (m *QueryRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryRequest proto.InternalMessageInfo - -func (m *QueryRequest) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *QueryRequest) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func (m *QueryRequest) GetMatchers() []*LabelMatcher { - if m != nil { - return m.Matchers - } - return nil -} - -type ExemplarQueryRequest struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - Matchers []*LabelMatchers `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` -} - -func (m *ExemplarQueryRequest) Reset() { *m = ExemplarQueryRequest{} } -func (*ExemplarQueryRequest) ProtoMessage() {} -func (*ExemplarQueryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{3} -} -func (m *ExemplarQueryRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExemplarQueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExemplarQueryRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExemplarQueryRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExemplarQueryRequest.Merge(m, src) -} -func (m *ExemplarQueryRequest) XXX_Size() int { - return m.Size() -} -func (m *ExemplarQueryRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExemplarQueryRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExemplarQueryRequest proto.InternalMessageInfo - -func (m *ExemplarQueryRequest) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *ExemplarQueryRequest) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func (m *ExemplarQueryRequest) GetMatchers() []*LabelMatchers { - if m != nil { - return m.Matchers - } - return nil -} - -type QueryResponse struct { - Timeseries []cortexpb.TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` -} - -func (m *QueryResponse) Reset() { *m = QueryResponse{} } -func (*QueryResponse) ProtoMessage() {} -func (*QueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{4} -} -func (m *QueryResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryResponse.Merge(m, src) -} -func (m *QueryResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryResponse proto.InternalMessageInfo - -func (m *QueryResponse) GetTimeseries() []cortexpb.TimeSeries { - if m != nil { - return m.Timeseries - } - return nil -} - -// QueryStreamResponse contains a batch of timeseries chunks or timeseries. Only one of these series will be populated. -type QueryStreamResponse struct { - Chunkseries []TimeSeriesChunk `protobuf:"bytes,1,rep,name=chunkseries,proto3" json:"chunkseries"` - Timeseries []cortexpb.TimeSeries `protobuf:"bytes,2,rep,name=timeseries,proto3" json:"timeseries"` -} - -func (m *QueryStreamResponse) Reset() { *m = QueryStreamResponse{} } -func (*QueryStreamResponse) ProtoMessage() {} -func (*QueryStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{5} -} -func (m *QueryStreamResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryStreamResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryStreamResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryStreamResponse.Merge(m, src) -} -func (m *QueryStreamResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryStreamResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryStreamResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryStreamResponse proto.InternalMessageInfo - -func (m *QueryStreamResponse) GetChunkseries() []TimeSeriesChunk { - if m != nil { - return m.Chunkseries - } - return nil -} - -func (m *QueryStreamResponse) GetTimeseries() []cortexpb.TimeSeries { - if m != nil { - return m.Timeseries - } - return nil -} - -type ExemplarQueryResponse struct { - Timeseries []cortexpb.TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` -} - -func (m *ExemplarQueryResponse) Reset() { *m = ExemplarQueryResponse{} } -func (*ExemplarQueryResponse) ProtoMessage() {} -func (*ExemplarQueryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{6} -} -func (m *ExemplarQueryResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExemplarQueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExemplarQueryResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExemplarQueryResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExemplarQueryResponse.Merge(m, src) -} -func (m *ExemplarQueryResponse) XXX_Size() int { - return m.Size() -} -func (m *ExemplarQueryResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExemplarQueryResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExemplarQueryResponse proto.InternalMessageInfo - -func (m *ExemplarQueryResponse) GetTimeseries() []cortexpb.TimeSeries { - if m != nil { - return m.Timeseries - } - return nil -} - -type LabelValuesRequest struct { - LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName,proto3" json:"label_name,omitempty"` - StartTimestampMs int64 `protobuf:"varint,2,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,3,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - Matchers *LabelMatchers `protobuf:"bytes,4,opt,name=matchers,proto3" json:"matchers,omitempty"` -} - -func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } -func (*LabelValuesRequest) ProtoMessage() {} -func (*LabelValuesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{7} -} -func (m *LabelValuesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelValuesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelValuesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelValuesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValuesRequest.Merge(m, src) -} -func (m *LabelValuesRequest) XXX_Size() int { - return m.Size() -} -func (m *LabelValuesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LabelValuesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelValuesRequest proto.InternalMessageInfo - -func (m *LabelValuesRequest) GetLabelName() string { - if m != nil { - return m.LabelName - } - return "" -} - -func (m *LabelValuesRequest) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *LabelValuesRequest) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func (m *LabelValuesRequest) GetMatchers() *LabelMatchers { - if m != nil { - return m.Matchers - } - return nil -} - -type LabelValuesResponse struct { - LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` -} - -func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} } -func (*LabelValuesResponse) ProtoMessage() {} -func (*LabelValuesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{8} -} -func (m *LabelValuesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelValuesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelValuesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelValuesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValuesResponse.Merge(m, src) -} -func (m *LabelValuesResponse) XXX_Size() int { - return m.Size() -} -func (m *LabelValuesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LabelValuesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelValuesResponse proto.InternalMessageInfo - -func (m *LabelValuesResponse) GetLabelValues() []string { - if m != nil { - return m.LabelValues - } - return nil -} - -type LabelValuesStreamResponse struct { - LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` -} - -func (m *LabelValuesStreamResponse) Reset() { *m = LabelValuesStreamResponse{} } -func (*LabelValuesStreamResponse) ProtoMessage() {} -func (*LabelValuesStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{9} -} -func (m *LabelValuesStreamResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelValuesStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelValuesStreamResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelValuesStreamResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValuesStreamResponse.Merge(m, src) -} -func (m *LabelValuesStreamResponse) XXX_Size() int { - return m.Size() -} -func (m *LabelValuesStreamResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LabelValuesStreamResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelValuesStreamResponse proto.InternalMessageInfo - -func (m *LabelValuesStreamResponse) GetLabelValues() []string { - if m != nil { - return m.LabelValues - } - return nil -} - -type LabelNamesRequest struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` -} - -func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } -func (*LabelNamesRequest) ProtoMessage() {} -func (*LabelNamesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{10} -} -func (m *LabelNamesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelNamesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelNamesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelNamesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelNamesRequest.Merge(m, src) -} -func (m *LabelNamesRequest) XXX_Size() int { - return m.Size() -} -func (m *LabelNamesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LabelNamesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelNamesRequest proto.InternalMessageInfo - -func (m *LabelNamesRequest) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *LabelNamesRequest) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -type LabelNamesResponse struct { - LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` -} - -func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} } -func (*LabelNamesResponse) ProtoMessage() {} -func (*LabelNamesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{11} -} -func (m *LabelNamesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelNamesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelNamesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelNamesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelNamesResponse.Merge(m, src) -} -func (m *LabelNamesResponse) XXX_Size() int { - return m.Size() -} -func (m *LabelNamesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LabelNamesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelNamesResponse proto.InternalMessageInfo - -func (m *LabelNamesResponse) GetLabelNames() []string { - if m != nil { - return m.LabelNames - } - return nil -} - -type LabelNamesStreamResponse struct { - LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` -} - -func (m *LabelNamesStreamResponse) Reset() { *m = LabelNamesStreamResponse{} } -func (*LabelNamesStreamResponse) ProtoMessage() {} -func (*LabelNamesStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{12} -} -func (m *LabelNamesStreamResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelNamesStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelNamesStreamResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelNamesStreamResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelNamesStreamResponse.Merge(m, src) -} -func (m *LabelNamesStreamResponse) XXX_Size() int { - return m.Size() -} -func (m *LabelNamesStreamResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LabelNamesStreamResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelNamesStreamResponse proto.InternalMessageInfo - -func (m *LabelNamesStreamResponse) GetLabelNames() []string { - if m != nil { - return m.LabelNames - } - return nil -} - -type UserStatsRequest struct { -} - -func (m *UserStatsRequest) Reset() { *m = UserStatsRequest{} } -func (*UserStatsRequest) ProtoMessage() {} -func (*UserStatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{13} -} -func (m *UserStatsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UserStatsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UserStatsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserStatsRequest.Merge(m, src) -} -func (m *UserStatsRequest) XXX_Size() int { - return m.Size() -} -func (m *UserStatsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UserStatsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UserStatsRequest proto.InternalMessageInfo - -type UserStatsResponse struct { - IngestionRate float64 `protobuf:"fixed64,1,opt,name=ingestion_rate,json=ingestionRate,proto3" json:"ingestion_rate,omitempty"` - NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries,proto3" json:"num_series,omitempty"` - ApiIngestionRate float64 `protobuf:"fixed64,3,opt,name=api_ingestion_rate,json=apiIngestionRate,proto3" json:"api_ingestion_rate,omitempty"` - RuleIngestionRate float64 `protobuf:"fixed64,4,opt,name=rule_ingestion_rate,json=ruleIngestionRate,proto3" json:"rule_ingestion_rate,omitempty"` -} - -func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} } -func (*UserStatsResponse) ProtoMessage() {} -func (*UserStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{14} -} -func (m *UserStatsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UserStatsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UserStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserStatsResponse.Merge(m, src) -} -func (m *UserStatsResponse) XXX_Size() int { - return m.Size() -} -func (m *UserStatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UserStatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UserStatsResponse proto.InternalMessageInfo - -func (m *UserStatsResponse) GetIngestionRate() float64 { - if m != nil { - return m.IngestionRate - } - return 0 -} - -func (m *UserStatsResponse) GetNumSeries() uint64 { - if m != nil { - return m.NumSeries - } - return 0 -} - -func (m *UserStatsResponse) GetApiIngestionRate() float64 { - if m != nil { - return m.ApiIngestionRate - } - return 0 -} - -func (m *UserStatsResponse) GetRuleIngestionRate() float64 { - if m != nil { - return m.RuleIngestionRate - } - return 0 -} - -type UserIDStatsResponse struct { - UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - Data *UserStatsResponse `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *UserIDStatsResponse) Reset() { *m = UserIDStatsResponse{} } -func (*UserIDStatsResponse) ProtoMessage() {} -func (*UserIDStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{15} -} -func (m *UserIDStatsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserIDStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UserIDStatsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UserIDStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserIDStatsResponse.Merge(m, src) -} -func (m *UserIDStatsResponse) XXX_Size() int { - return m.Size() -} -func (m *UserIDStatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UserIDStatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UserIDStatsResponse proto.InternalMessageInfo - -func (m *UserIDStatsResponse) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -func (m *UserIDStatsResponse) GetData() *UserStatsResponse { - if m != nil { - return m.Data - } - return nil -} - -type UsersStatsResponse struct { - Stats []*UserIDStatsResponse `protobuf:"bytes,1,rep,name=stats,proto3" json:"stats,omitempty"` -} - -func (m *UsersStatsResponse) Reset() { *m = UsersStatsResponse{} } -func (*UsersStatsResponse) ProtoMessage() {} -func (*UsersStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{16} -} -func (m *UsersStatsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UsersStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UsersStatsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UsersStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UsersStatsResponse.Merge(m, src) -} -func (m *UsersStatsResponse) XXX_Size() int { - return m.Size() -} -func (m *UsersStatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UsersStatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UsersStatsResponse proto.InternalMessageInfo - -func (m *UsersStatsResponse) GetStats() []*UserIDStatsResponse { - if m != nil { - return m.Stats - } - return nil -} - -type MetricsForLabelMatchersRequest struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - MatchersSet []*LabelMatchers `protobuf:"bytes,3,rep,name=matchers_set,json=matchersSet,proto3" json:"matchers_set,omitempty"` -} - -func (m *MetricsForLabelMatchersRequest) Reset() { *m = MetricsForLabelMatchersRequest{} } -func (*MetricsForLabelMatchersRequest) ProtoMessage() {} -func (*MetricsForLabelMatchersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{17} -} -func (m *MetricsForLabelMatchersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsForLabelMatchersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsForLabelMatchersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricsForLabelMatchersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsForLabelMatchersRequest.Merge(m, src) -} -func (m *MetricsForLabelMatchersRequest) XXX_Size() int { - return m.Size() -} -func (m *MetricsForLabelMatchersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsForLabelMatchersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricsForLabelMatchersRequest proto.InternalMessageInfo - -func (m *MetricsForLabelMatchersRequest) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *MetricsForLabelMatchersRequest) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func (m *MetricsForLabelMatchersRequest) GetMatchersSet() []*LabelMatchers { - if m != nil { - return m.MatchersSet - } - return nil -} - -type MetricsForLabelMatchersResponse struct { - Metric []*cortexpb.Metric `protobuf:"bytes,1,rep,name=metric,proto3" json:"metric,omitempty"` -} - -func (m *MetricsForLabelMatchersResponse) Reset() { *m = MetricsForLabelMatchersResponse{} } -func (*MetricsForLabelMatchersResponse) ProtoMessage() {} -func (*MetricsForLabelMatchersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{18} -} -func (m *MetricsForLabelMatchersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsForLabelMatchersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsForLabelMatchersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricsForLabelMatchersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsForLabelMatchersResponse.Merge(m, src) -} -func (m *MetricsForLabelMatchersResponse) XXX_Size() int { - return m.Size() -} -func (m *MetricsForLabelMatchersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsForLabelMatchersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricsForLabelMatchersResponse proto.InternalMessageInfo - -func (m *MetricsForLabelMatchersResponse) GetMetric() []*cortexpb.Metric { - if m != nil { - return m.Metric - } - return nil -} - -type MetricsForLabelMatchersStreamResponse struct { - Metric []*cortexpb.Metric `protobuf:"bytes,1,rep,name=metric,proto3" json:"metric,omitempty"` -} - -func (m *MetricsForLabelMatchersStreamResponse) Reset() { *m = MetricsForLabelMatchersStreamResponse{} } -func (*MetricsForLabelMatchersStreamResponse) ProtoMessage() {} -func (*MetricsForLabelMatchersStreamResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{19} -} -func (m *MetricsForLabelMatchersStreamResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsForLabelMatchersStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsForLabelMatchersStreamResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricsForLabelMatchersStreamResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsForLabelMatchersStreamResponse.Merge(m, src) -} -func (m *MetricsForLabelMatchersStreamResponse) XXX_Size() int { - return m.Size() -} -func (m *MetricsForLabelMatchersStreamResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsForLabelMatchersStreamResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricsForLabelMatchersStreamResponse proto.InternalMessageInfo - -func (m *MetricsForLabelMatchersStreamResponse) GetMetric() []*cortexpb.Metric { - if m != nil { - return m.Metric - } - return nil -} - -type MetricsMetadataRequest struct { -} - -func (m *MetricsMetadataRequest) Reset() { *m = MetricsMetadataRequest{} } -func (*MetricsMetadataRequest) ProtoMessage() {} -func (*MetricsMetadataRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{20} -} -func (m *MetricsMetadataRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsMetadataRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsMetadataRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricsMetadataRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsMetadataRequest.Merge(m, src) -} -func (m *MetricsMetadataRequest) XXX_Size() int { - return m.Size() -} -func (m *MetricsMetadataRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsMetadataRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricsMetadataRequest proto.InternalMessageInfo - -type MetricsMetadataResponse struct { - Metadata []*cortexpb.MetricMetadata `protobuf:"bytes,1,rep,name=metadata,proto3" json:"metadata,omitempty"` -} - -func (m *MetricsMetadataResponse) Reset() { *m = MetricsMetadataResponse{} } -func (*MetricsMetadataResponse) ProtoMessage() {} -func (*MetricsMetadataResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{21} -} -func (m *MetricsMetadataResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsMetadataResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsMetadataResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricsMetadataResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsMetadataResponse.Merge(m, src) -} -func (m *MetricsMetadataResponse) XXX_Size() int { - return m.Size() -} -func (m *MetricsMetadataResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsMetadataResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricsMetadataResponse proto.InternalMessageInfo - -func (m *MetricsMetadataResponse) GetMetadata() []*cortexpb.MetricMetadata { - if m != nil { - return m.Metadata - } - return nil -} - -type TimeSeriesChunk struct { - FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` - UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - Labels []github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter `protobuf:"bytes,3,rep,name=labels,proto3,customtype=github.com/thanos-io/thanos/internal/cortex/cortexpb.LabelAdapter" json:"labels"` - Chunks []Chunk `protobuf:"bytes,4,rep,name=chunks,proto3" json:"chunks"` -} - -func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} } -func (*TimeSeriesChunk) ProtoMessage() {} -func (*TimeSeriesChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{22} -} -func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TimeSeriesChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TimeSeriesChunk.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TimeSeriesChunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeriesChunk.Merge(m, src) -} -func (m *TimeSeriesChunk) XXX_Size() int { - return m.Size() -} -func (m *TimeSeriesChunk) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeriesChunk.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeriesChunk proto.InternalMessageInfo - -func (m *TimeSeriesChunk) GetFromIngesterId() string { - if m != nil { - return m.FromIngesterId - } - return "" -} - -func (m *TimeSeriesChunk) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -func (m *TimeSeriesChunk) GetChunks() []Chunk { - if m != nil { - return m.Chunks - } - return nil -} - -type Chunk struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - Encoding int32 `protobuf:"varint,3,opt,name=encoding,proto3" json:"encoding,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *Chunk) Reset() { *m = Chunk{} } -func (*Chunk) ProtoMessage() {} -func (*Chunk) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{23} -} -func (m *Chunk) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Chunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_Chunk.Merge(m, src) -} -func (m *Chunk) XXX_Size() int { - return m.Size() -} -func (m *Chunk) XXX_DiscardUnknown() { - xxx_messageInfo_Chunk.DiscardUnknown(m) -} - -var xxx_messageInfo_Chunk proto.InternalMessageInfo - -func (m *Chunk) GetStartTimestampMs() int64 { - if m != nil { - return m.StartTimestampMs - } - return 0 -} - -func (m *Chunk) GetEndTimestampMs() int64 { - if m != nil { - return m.EndTimestampMs - } - return 0 -} - -func (m *Chunk) GetEncoding() int32 { - if m != nil { - return m.Encoding - } - return 0 -} - -func (m *Chunk) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type TransferChunksResponse struct { -} - -func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} } -func (*TransferChunksResponse) ProtoMessage() {} -func (*TransferChunksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{24} -} -func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TransferChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TransferChunksResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TransferChunksResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TransferChunksResponse.Merge(m, src) -} -func (m *TransferChunksResponse) XXX_Size() int { - return m.Size() -} -func (m *TransferChunksResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TransferChunksResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_TransferChunksResponse proto.InternalMessageInfo - -type LabelMatchers struct { - Matchers []*LabelMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"` -} - -func (m *LabelMatchers) Reset() { *m = LabelMatchers{} } -func (*LabelMatchers) ProtoMessage() {} -func (*LabelMatchers) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{25} -} -func (m *LabelMatchers) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelMatchers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelMatchers.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelMatchers) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelMatchers.Merge(m, src) -} -func (m *LabelMatchers) XXX_Size() int { - return m.Size() -} -func (m *LabelMatchers) XXX_DiscardUnknown() { - xxx_messageInfo_LabelMatchers.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelMatchers proto.InternalMessageInfo - -func (m *LabelMatchers) GetMatchers() []*LabelMatcher { - if m != nil { - return m.Matchers - } - return nil -} - -type LabelMatcher struct { - Type MatchType `protobuf:"varint,1,opt,name=type,proto3,enum=cortex.MatchType" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } -func (*LabelMatcher) ProtoMessage() {} -func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{26} -} -func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LabelMatcher) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelMatcher.Merge(m, src) -} -func (m *LabelMatcher) XXX_Size() int { - return m.Size() -} -func (m *LabelMatcher) XXX_DiscardUnknown() { - xxx_messageInfo_LabelMatcher.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo - -func (m *LabelMatcher) GetType() MatchType { - if m != nil { - return m.Type - } - return EQUAL -} - -func (m *LabelMatcher) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *LabelMatcher) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -type TimeSeriesFile struct { - FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` - UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - Filename string `protobuf:"bytes,3,opt,name=filename,proto3" json:"filename,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *TimeSeriesFile) Reset() { *m = TimeSeriesFile{} } -func (*TimeSeriesFile) ProtoMessage() {} -func (*TimeSeriesFile) Descriptor() ([]byte, []int) { - return fileDescriptor_60f6df4f3586b478, []int{27} -} -func (m *TimeSeriesFile) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TimeSeriesFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TimeSeriesFile.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TimeSeriesFile) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeriesFile.Merge(m, src) -} -func (m *TimeSeriesFile) XXX_Size() int { - return m.Size() -} -func (m *TimeSeriesFile) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeriesFile.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeriesFile proto.InternalMessageInfo - -func (m *TimeSeriesFile) GetFromIngesterId() string { - if m != nil { - return m.FromIngesterId - } - return "" -} - -func (m *TimeSeriesFile) GetUserId() string { - if m != nil { - return m.UserId - } - return "" -} - -func (m *TimeSeriesFile) GetFilename() string { - if m != nil { - return m.Filename - } - return "" -} - -func (m *TimeSeriesFile) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func init() { - proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value) - proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest") - proto.RegisterType((*ReadResponse)(nil), "cortex.ReadResponse") - proto.RegisterType((*QueryRequest)(nil), "cortex.QueryRequest") - proto.RegisterType((*ExemplarQueryRequest)(nil), "cortex.ExemplarQueryRequest") - proto.RegisterType((*QueryResponse)(nil), "cortex.QueryResponse") - proto.RegisterType((*QueryStreamResponse)(nil), "cortex.QueryStreamResponse") - proto.RegisterType((*ExemplarQueryResponse)(nil), "cortex.ExemplarQueryResponse") - proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest") - proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse") - proto.RegisterType((*LabelValuesStreamResponse)(nil), "cortex.LabelValuesStreamResponse") - proto.RegisterType((*LabelNamesRequest)(nil), "cortex.LabelNamesRequest") - proto.RegisterType((*LabelNamesResponse)(nil), "cortex.LabelNamesResponse") - proto.RegisterType((*LabelNamesStreamResponse)(nil), "cortex.LabelNamesStreamResponse") - proto.RegisterType((*UserStatsRequest)(nil), "cortex.UserStatsRequest") - proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse") - proto.RegisterType((*UserIDStatsResponse)(nil), "cortex.UserIDStatsResponse") - proto.RegisterType((*UsersStatsResponse)(nil), "cortex.UsersStatsResponse") - proto.RegisterType((*MetricsForLabelMatchersRequest)(nil), "cortex.MetricsForLabelMatchersRequest") - proto.RegisterType((*MetricsForLabelMatchersResponse)(nil), "cortex.MetricsForLabelMatchersResponse") - proto.RegisterType((*MetricsForLabelMatchersStreamResponse)(nil), "cortex.MetricsForLabelMatchersStreamResponse") - proto.RegisterType((*MetricsMetadataRequest)(nil), "cortex.MetricsMetadataRequest") - proto.RegisterType((*MetricsMetadataResponse)(nil), "cortex.MetricsMetadataResponse") - proto.RegisterType((*TimeSeriesChunk)(nil), "cortex.TimeSeriesChunk") - proto.RegisterType((*Chunk)(nil), "cortex.Chunk") - proto.RegisterType((*TransferChunksResponse)(nil), "cortex.TransferChunksResponse") - proto.RegisterType((*LabelMatchers)(nil), "cortex.LabelMatchers") - proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher") - proto.RegisterType((*TimeSeriesFile)(nil), "cortex.TimeSeriesFile") -} - -func init() { proto.RegisterFile("ingester.proto", fileDescriptor_60f6df4f3586b478) } - -var fileDescriptor_60f6df4f3586b478 = []byte{ - // 1327 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcf, 0x6f, 0xd4, 0xc6, - 0x17, 0xf7, 0x24, 0x9b, 0x25, 0xfb, 0x76, 0xb3, 0x6c, 0x26, 0x40, 0x16, 0xf3, 0xc5, 0x01, 0x4b, - 0x7c, 0x1b, 0xb5, 0x25, 0x81, 0xf4, 0x87, 0xa0, 0xbf, 0x50, 0x02, 0x01, 0x52, 0x08, 0x01, 0xef, - 0x42, 0xab, 0x4a, 0x95, 0xe5, 0xdd, 0x9d, 0x6c, 0x5c, 0xfc, 0x0b, 0xcf, 0xb8, 0x82, 0x9e, 0x2a, - 0xf5, 0x0f, 0x68, 0xd5, 0x53, 0xaf, 0xbd, 0xf5, 0xdc, 0x4b, 0x6f, 0x3d, 0x73, 0xe4, 0x88, 0x7a, - 0x40, 0x65, 0xb9, 0xf4, 0x52, 0x89, 0xfe, 0x07, 0x95, 0xc7, 0x63, 0xaf, 0xed, 0x78, 0x93, 0x45, - 0x22, 0xbd, 0xad, 0xdf, 0xfb, 0xbc, 0xcf, 0x7c, 0x66, 0xde, 0x9b, 0x79, 0x2f, 0x81, 0xba, 0xe9, - 0xf4, 0x09, 0x65, 0xc4, 0x5f, 0xf2, 0x7c, 0x97, 0xb9, 0xb8, 0xdc, 0x75, 0x7d, 0x46, 0x1e, 0xca, - 0x67, 0xfb, 0x26, 0xdb, 0x09, 0x3a, 0x4b, 0x5d, 0xd7, 0x5e, 0xee, 0xbb, 0x7d, 0x77, 0x99, 0xbb, - 0x3b, 0xc1, 0x36, 0xff, 0xe2, 0x1f, 0xfc, 0x57, 0x14, 0x26, 0x5f, 0x4c, 0xc1, 0x23, 0x06, 0xcf, - 0x77, 0xbf, 0x22, 0x5d, 0x26, 0xbe, 0x96, 0xbd, 0xfb, 0xfd, 0xd8, 0xd1, 0x11, 0x3f, 0xa2, 0x50, - 0xf5, 0x63, 0xa8, 0x6a, 0xc4, 0xe8, 0x69, 0xe4, 0x41, 0x40, 0x28, 0xc3, 0x4b, 0x70, 0xe8, 0x41, - 0x40, 0x7c, 0x93, 0xd0, 0x26, 0x3a, 0x35, 0xb9, 0x58, 0x5d, 0x39, 0xb2, 0x24, 0xe0, 0x77, 0x02, - 0xe2, 0x3f, 0x12, 0x30, 0x2d, 0x06, 0xa9, 0x97, 0xa0, 0x16, 0x85, 0x53, 0xcf, 0x75, 0x28, 0xc1, - 0xcb, 0x70, 0xc8, 0x27, 0x34, 0xb0, 0x58, 0x1c, 0x7f, 0x34, 0x17, 0x1f, 0xe1, 0xb4, 0x18, 0xa5, - 0xfe, 0x84, 0xa0, 0x96, 0xa6, 0xc6, 0x6f, 0x03, 0xa6, 0xcc, 0xf0, 0x99, 0xce, 0x4c, 0x9b, 0x50, - 0x66, 0xd8, 0x9e, 0x6e, 0x87, 0x64, 0x68, 0x71, 0x52, 0x6b, 0x70, 0x4f, 0x3b, 0x76, 0x6c, 0x52, - 0xbc, 0x08, 0x0d, 0xe2, 0xf4, 0xb2, 0xd8, 0x09, 0x8e, 0xad, 0x13, 0xa7, 0x97, 0x46, 0x9e, 0x83, - 0x69, 0xdb, 0x60, 0xdd, 0x1d, 0xe2, 0xd3, 0xe6, 0x64, 0x76, 0x6b, 0x37, 0x8d, 0x0e, 0xb1, 0x36, - 0x23, 0xa7, 0x96, 0xa0, 0xd4, 0x9f, 0x11, 0x1c, 0x59, 0x7f, 0x48, 0x6c, 0xcf, 0x32, 0xfc, 0xff, - 0x44, 0xe2, 0xf9, 0x5d, 0x12, 0x8f, 0x16, 0x49, 0xa4, 0x29, 0x8d, 0x37, 0x60, 0x26, 0x73, 0xb0, - 0xf8, 0x03, 0x00, 0xbe, 0x52, 0x51, 0x0e, 0xbd, 0xce, 0x52, 0xb8, 0x5c, 0x8b, 0xfb, 0xd6, 0x4a, - 0x8f, 0x9f, 0x2d, 0x48, 0x5a, 0x0a, 0xad, 0xfe, 0x88, 0x60, 0x8e, 0xb3, 0xb5, 0x98, 0x4f, 0x0c, - 0x3b, 0xe1, 0xbc, 0x04, 0xd5, 0xee, 0x4e, 0xe0, 0xdc, 0xcf, 0x90, 0xce, 0xc7, 0xd2, 0x86, 0x94, - 0x97, 0x43, 0x90, 0xe0, 0x4d, 0x47, 0xe4, 0x44, 0x4d, 0xbc, 0x92, 0xa8, 0x16, 0x1c, 0xcd, 0x25, - 0xe1, 0x35, 0xec, 0xf4, 0x77, 0x04, 0x98, 0x1f, 0xe9, 0x3d, 0xc3, 0x0a, 0x08, 0x8d, 0x13, 0x7b, - 0x12, 0xc0, 0x0a, 0xad, 0xba, 0x63, 0xd8, 0x84, 0x27, 0xb4, 0xa2, 0x55, 0xb8, 0xe5, 0x96, 0x61, - 0x93, 0x11, 0x79, 0x9f, 0x78, 0x85, 0xbc, 0x4f, 0xee, 0x9b, 0xf7, 0xd2, 0x29, 0x34, 0x4e, 0xde, - 0x2f, 0xc0, 0x5c, 0x46, 0xbf, 0x38, 0x93, 0xd3, 0x50, 0x8b, 0x36, 0xf0, 0x35, 0xb7, 0xf3, 0x53, - 0xa9, 0x68, 0x55, 0x6b, 0x08, 0x55, 0x3f, 0x81, 0xe3, 0xa9, 0xc8, 0x5c, 0xa6, 0xc7, 0x88, 0xbf, - 0x0f, 0xb3, 0x37, 0xe3, 0x13, 0xa1, 0x07, 0x7c, 0x23, 0xd4, 0xf7, 0x44, 0x9a, 0xc4, 0x62, 0x42, - 0xe5, 0x02, 0x54, 0x87, 0x69, 0x8a, 0x45, 0x42, 0x92, 0x27, 0xaa, 0x7e, 0x08, 0xcd, 0x61, 0x58, - 0x6e, 0x8b, 0xfb, 0x06, 0x63, 0x68, 0xdc, 0xa5, 0xc4, 0x6f, 0x31, 0x83, 0xc5, 0xfb, 0x53, 0x7f, - 0x43, 0x30, 0x9b, 0x32, 0x0a, 0xaa, 0x33, 0xf1, 0xfb, 0x6d, 0xba, 0x8e, 0xee, 0x1b, 0x2c, 0x2a, - 0x19, 0xa4, 0xcd, 0x24, 0x56, 0xcd, 0x60, 0x24, 0xac, 0x2a, 0x27, 0xb0, 0xf5, 0xa4, 0xfa, 0xd1, - 0x62, 0x49, 0xab, 0x38, 0x81, 0x1d, 0x55, 0x67, 0x78, 0x76, 0x86, 0x67, 0xea, 0x39, 0xa6, 0x49, - 0xce, 0xd4, 0x30, 0x3c, 0x73, 0x23, 0x43, 0xb6, 0x04, 0x73, 0x7e, 0x60, 0x91, 0x3c, 0xbc, 0xc4, - 0xe1, 0xb3, 0xa1, 0x2b, 0x83, 0x57, 0xbf, 0x84, 0xb9, 0x50, 0xf8, 0xc6, 0x95, 0xac, 0xf4, 0x79, - 0x38, 0x14, 0x50, 0xe2, 0xeb, 0x66, 0x4f, 0x94, 0x79, 0x39, 0xfc, 0xdc, 0xe8, 0xe1, 0xb3, 0x50, - 0xea, 0x19, 0xcc, 0xe0, 0x32, 0xab, 0x2b, 0xc7, 0xe3, 0x3a, 0xdc, 0xb5, 0x79, 0x8d, 0xc3, 0xd4, - 0x6b, 0x80, 0x43, 0x17, 0xcd, 0xb2, 0x9f, 0x87, 0x29, 0x1a, 0x1a, 0xc4, 0xad, 0x3c, 0x91, 0x66, - 0xc9, 0x29, 0xd1, 0x22, 0xa4, 0xfa, 0x2b, 0x02, 0x65, 0x93, 0x30, 0xdf, 0xec, 0xd2, 0xab, 0xae, - 0x9f, 0x2d, 0xfb, 0x03, 0x7e, 0x76, 0x2f, 0x40, 0x2d, 0xbe, 0x57, 0x3a, 0x25, 0x6c, 0xef, 0xa7, - 0xb7, 0x1a, 0x43, 0x5b, 0x84, 0xa9, 0x37, 0x60, 0x61, 0xa4, 0x66, 0x71, 0x14, 0x8b, 0x50, 0xb6, - 0x39, 0x44, 0x9c, 0x45, 0x63, 0xf8, 0x42, 0x45, 0xa1, 0x9a, 0xf0, 0xab, 0x77, 0xe0, 0xcc, 0x08, - 0xb2, 0x5c, 0x05, 0x8f, 0x4f, 0xd9, 0x84, 0x63, 0x82, 0x72, 0x93, 0x30, 0x23, 0x4c, 0x58, 0x5c, - 0xd0, 0x5b, 0x30, 0xbf, 0xcb, 0x23, 0xe8, 0xdf, 0x85, 0x69, 0x5b, 0xd8, 0xc4, 0x02, 0xcd, 0xfc, - 0x02, 0x49, 0x4c, 0x82, 0x54, 0xff, 0x41, 0x70, 0x38, 0xd7, 0x09, 0xc2, 0x14, 0x6c, 0xfb, 0xae, - 0xad, 0xc7, 0x43, 0xce, 0xb0, 0xda, 0xea, 0xa1, 0x7d, 0x43, 0x98, 0x37, 0x7a, 0xe9, 0x72, 0x9c, - 0xc8, 0x94, 0xa3, 0x03, 0x65, 0x7e, 0x35, 0xe3, 0x86, 0x38, 0x37, 0x94, 0xc2, 0x8f, 0xe8, 0xb6, - 0x61, 0xfa, 0x6b, 0xab, 0xe1, 0xfb, 0xfe, 0xc7, 0xb3, 0x85, 0x57, 0x1a, 0x83, 0xa2, 0xf8, 0xd5, - 0x9e, 0xe1, 0x31, 0xe2, 0x6b, 0x62, 0x15, 0xfc, 0x16, 0x94, 0xa3, 0xc6, 0xd5, 0x2c, 0xf1, 0xf5, - 0x66, 0xe2, 0x2a, 0x48, 0xf7, 0x36, 0x01, 0x51, 0xbf, 0x47, 0x30, 0x15, 0xed, 0xf4, 0xa0, 0x4a, - 0x53, 0x86, 0x69, 0xe2, 0x74, 0xdd, 0x9e, 0xe9, 0xf4, 0xf9, 0x8b, 0x30, 0xa5, 0x25, 0xdf, 0x18, - 0x8b, 0x9b, 0x1a, 0x5e, 0xfd, 0x9a, 0xb8, 0x8e, 0x4d, 0x38, 0xd6, 0xf6, 0x0d, 0x87, 0x6e, 0x13, - 0x9f, 0x0b, 0x4b, 0xea, 0x50, 0x5d, 0x85, 0x99, 0x4c, 0x4d, 0x65, 0xe6, 0x21, 0x34, 0xd6, 0x3c, - 0xa4, 0x43, 0x2d, 0xed, 0xc1, 0x67, 0xa0, 0xc4, 0x1e, 0x79, 0xd1, 0xa3, 0x57, 0x5f, 0x99, 0x8d, - 0xa3, 0xb9, 0xbb, 0xfd, 0xc8, 0x23, 0x1a, 0x77, 0x87, 0x3a, 0x79, 0x3b, 0x8d, 0x12, 0xcb, 0x7f, - 0xe3, 0x23, 0x30, 0xc5, 0x3b, 0x0c, 0xdf, 0x54, 0x45, 0x8b, 0x3e, 0xd4, 0xef, 0x10, 0xd4, 0x87, - 0x35, 0x74, 0xd5, 0xb4, 0xc8, 0xeb, 0x28, 0x21, 0x19, 0xa6, 0xb7, 0x4d, 0x8b, 0x70, 0x0d, 0xd1, - 0x72, 0xc9, 0x77, 0xd1, 0x19, 0xbe, 0xf9, 0x29, 0x54, 0x92, 0x2d, 0xe0, 0x0a, 0x4c, 0xad, 0xdf, - 0xb9, 0xbb, 0x7a, 0xb3, 0x21, 0xe1, 0x19, 0xa8, 0xdc, 0xda, 0x6a, 0xeb, 0xd1, 0x27, 0xc2, 0x87, - 0xa1, 0xaa, 0xad, 0x5f, 0x5b, 0xff, 0x5c, 0xdf, 0x5c, 0x6d, 0x5f, 0xbe, 0xde, 0x98, 0xc0, 0x18, - 0xea, 0x91, 0xe1, 0xd6, 0x96, 0xb0, 0x4d, 0xae, 0xfc, 0x3d, 0x0d, 0xd3, 0xb1, 0x46, 0x7c, 0x11, - 0x4a, 0xb7, 0x03, 0xba, 0x83, 0x8f, 0x0d, 0x6b, 0xf8, 0x33, 0xdf, 0x64, 0x44, 0xdc, 0x49, 0x79, - 0x7e, 0x97, 0x5d, 0xe4, 0x4e, 0xc2, 0xef, 0xc3, 0x14, 0x1f, 0x7e, 0x70, 0xe1, 0x38, 0x2e, 0x17, - 0x0f, 0xd9, 0xaa, 0x84, 0xaf, 0x40, 0x35, 0x35, 0xd0, 0x8d, 0x88, 0x3e, 0x91, 0xb1, 0x66, 0x1f, - 0x1b, 0x55, 0x3a, 0x87, 0xf0, 0x16, 0xd4, 0xb9, 0x2b, 0x9e, 0xc3, 0x28, 0xfe, 0x5f, 0x1c, 0x52, - 0x34, 0x1f, 0xcb, 0x27, 0x47, 0x78, 0x13, 0x59, 0xd7, 0xa1, 0x9a, 0x9a, 0x41, 0xb0, 0x9c, 0x29, - 0xbc, 0xcc, 0x48, 0x36, 0x14, 0x57, 0x30, 0xee, 0xa8, 0x12, 0xbe, 0x27, 0xa6, 0x91, 0xf4, 0x34, - 0xb3, 0x27, 0xdf, 0xe9, 0x02, 0x5f, 0xc1, 0x96, 0xd7, 0x01, 0x86, 0x13, 0x04, 0x3e, 0x9e, 0x09, - 0x4a, 0x4f, 0x3e, 0xb2, 0x5c, 0xe4, 0x4a, 0xe4, 0xb5, 0xa0, 0x91, 0x1f, 0x44, 0xf6, 0x22, 0x3b, - 0xb5, 0xdb, 0x55, 0xa0, 0x6d, 0x0d, 0x2a, 0x49, 0x3b, 0xc6, 0xcd, 0x82, 0x0e, 0x1d, 0x91, 0x8d, - 0xee, 0xdd, 0xaa, 0x84, 0xaf, 0x42, 0x6d, 0xd5, 0xb2, 0xc6, 0xa1, 0x91, 0xd3, 0x1e, 0x9a, 0xe7, - 0xb1, 0x92, 0x3e, 0x92, 0x6f, 0x5a, 0xf8, 0xff, 0xc9, 0x83, 0xb0, 0x67, 0x5b, 0x97, 0xdf, 0xd8, - 0x17, 0x97, 0xac, 0xf6, 0x0d, 0x9c, 0xdc, 0xb3, 0x45, 0x8e, 0xbd, 0xe6, 0xd9, 0x7d, 0x70, 0x05, - 0xa7, 0xde, 0x86, 0xc3, 0xb9, 0x8e, 0x89, 0x95, 0x1c, 0x4b, 0xae, 0xc9, 0xca, 0x0b, 0x23, 0xfd, - 0xc9, 0x8e, 0x36, 0xa1, 0x9e, 0x7d, 0xb0, 0xf1, 0xa8, 0xbf, 0xab, 0xe4, 0x64, 0xb5, 0x11, 0x2f, - 0xbc, 0xb4, 0x88, 0xd6, 0x3e, 0x7a, 0xf2, 0x5c, 0x91, 0x9e, 0x3e, 0x57, 0xa4, 0x97, 0xcf, 0x15, - 0xf4, 0xed, 0x40, 0x41, 0xbf, 0x0c, 0x14, 0xf4, 0x78, 0xa0, 0xa0, 0x27, 0x03, 0x05, 0xfd, 0x39, - 0x50, 0xd0, 0x5f, 0x03, 0x45, 0x7a, 0x39, 0x50, 0xd0, 0x0f, 0x2f, 0x14, 0xe9, 0xc9, 0x0b, 0x45, - 0x7a, 0xfa, 0x42, 0x91, 0xbe, 0x28, 0x77, 0x2d, 0x93, 0x38, 0xac, 0x53, 0xe6, 0xff, 0x12, 0x78, - 0xe7, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6a, 0xb8, 0x31, 0x07, 0x96, 0x10, 0x00, 0x00, -} - -func (x MatchType) String() string { - s, ok := MatchType_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (this *ReadRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ReadRequest) - if !ok { - that2, ok := that.(ReadRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Queries) != len(that1.Queries) { - return false - } - for i := range this.Queries { - if !this.Queries[i].Equal(that1.Queries[i]) { - return false - } - } - return true -} -func (this *ReadResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ReadResponse) - if !ok { - that2, ok := that.(ReadResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Results) != len(that1.Results) { - return false - } - for i := range this.Results { - if !this.Results[i].Equal(that1.Results[i]) { - return false - } - } - return true -} -func (this *QueryRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueryRequest) - if !ok { - that2, ok := that.(QueryRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if len(this.Matchers) != len(that1.Matchers) { - return false - } - for i := range this.Matchers { - if !this.Matchers[i].Equal(that1.Matchers[i]) { - return false - } - } - return true -} -func (this *ExemplarQueryRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ExemplarQueryRequest) - if !ok { - that2, ok := that.(ExemplarQueryRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if len(this.Matchers) != len(that1.Matchers) { - return false - } - for i := range this.Matchers { - if !this.Matchers[i].Equal(that1.Matchers[i]) { - return false - } - } - return true -} -func (this *QueryResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueryResponse) - if !ok { - that2, ok := that.(QueryResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Timeseries) != len(that1.Timeseries) { - return false - } - for i := range this.Timeseries { - if !this.Timeseries[i].Equal(&that1.Timeseries[i]) { - return false - } - } - return true -} -func (this *QueryStreamResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QueryStreamResponse) - if !ok { - that2, ok := that.(QueryStreamResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Chunkseries) != len(that1.Chunkseries) { - return false - } - for i := range this.Chunkseries { - if !this.Chunkseries[i].Equal(&that1.Chunkseries[i]) { - return false - } - } - if len(this.Timeseries) != len(that1.Timeseries) { - return false - } - for i := range this.Timeseries { - if !this.Timeseries[i].Equal(&that1.Timeseries[i]) { - return false - } - } - return true -} -func (this *ExemplarQueryResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ExemplarQueryResponse) - if !ok { - that2, ok := that.(ExemplarQueryResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Timeseries) != len(that1.Timeseries) { - return false - } - for i := range this.Timeseries { - if !this.Timeseries[i].Equal(&that1.Timeseries[i]) { - return false - } - } - return true -} -func (this *LabelValuesRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelValuesRequest) - if !ok { - that2, ok := that.(LabelValuesRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.LabelName != that1.LabelName { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if !this.Matchers.Equal(that1.Matchers) { - return false - } - return true -} -func (this *LabelValuesResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelValuesResponse) - if !ok { - that2, ok := that.(LabelValuesResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.LabelValues) != len(that1.LabelValues) { - return false - } - for i := range this.LabelValues { - if this.LabelValues[i] != that1.LabelValues[i] { - return false - } - } - return true -} -func (this *LabelValuesStreamResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelValuesStreamResponse) - if !ok { - that2, ok := that.(LabelValuesStreamResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.LabelValues) != len(that1.LabelValues) { - return false - } - for i := range this.LabelValues { - if this.LabelValues[i] != that1.LabelValues[i] { - return false - } - } - return true -} -func (this *LabelNamesRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelNamesRequest) - if !ok { - that2, ok := that.(LabelNamesRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - return true -} -func (this *LabelNamesResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelNamesResponse) - if !ok { - that2, ok := that.(LabelNamesResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.LabelNames) != len(that1.LabelNames) { - return false - } - for i := range this.LabelNames { - if this.LabelNames[i] != that1.LabelNames[i] { - return false - } - } - return true -} -func (this *LabelNamesStreamResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelNamesStreamResponse) - if !ok { - that2, ok := that.(LabelNamesStreamResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.LabelNames) != len(that1.LabelNames) { - return false - } - for i := range this.LabelNames { - if this.LabelNames[i] != that1.LabelNames[i] { - return false - } - } - return true -} -func (this *UserStatsRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UserStatsRequest) - if !ok { - that2, ok := that.(UserStatsRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *UserStatsResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UserStatsResponse) - if !ok { - that2, ok := that.(UserStatsResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.IngestionRate != that1.IngestionRate { - return false - } - if this.NumSeries != that1.NumSeries { - return false - } - if this.ApiIngestionRate != that1.ApiIngestionRate { - return false - } - if this.RuleIngestionRate != that1.RuleIngestionRate { - return false - } - return true -} -func (this *UserIDStatsResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UserIDStatsResponse) - if !ok { - that2, ok := that.(UserIDStatsResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.UserId != that1.UserId { - return false - } - if !this.Data.Equal(that1.Data) { - return false - } - return true -} -func (this *UsersStatsResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*UsersStatsResponse) - if !ok { - that2, ok := that.(UsersStatsResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Stats) != len(that1.Stats) { - return false - } - for i := range this.Stats { - if !this.Stats[i].Equal(that1.Stats[i]) { - return false - } - } - return true -} -func (this *MetricsForLabelMatchersRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*MetricsForLabelMatchersRequest) - if !ok { - that2, ok := that.(MetricsForLabelMatchersRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if len(this.MatchersSet) != len(that1.MatchersSet) { - return false - } - for i := range this.MatchersSet { - if !this.MatchersSet[i].Equal(that1.MatchersSet[i]) { - return false - } - } - return true -} -func (this *MetricsForLabelMatchersResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*MetricsForLabelMatchersResponse) - if !ok { - that2, ok := that.(MetricsForLabelMatchersResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Metric) != len(that1.Metric) { - return false - } - for i := range this.Metric { - if !this.Metric[i].Equal(that1.Metric[i]) { - return false - } - } - return true -} -func (this *MetricsForLabelMatchersStreamResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*MetricsForLabelMatchersStreamResponse) - if !ok { - that2, ok := that.(MetricsForLabelMatchersStreamResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Metric) != len(that1.Metric) { - return false - } - for i := range this.Metric { - if !this.Metric[i].Equal(that1.Metric[i]) { - return false - } - } - return true -} -func (this *MetricsMetadataRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*MetricsMetadataRequest) - if !ok { - that2, ok := that.(MetricsMetadataRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *MetricsMetadataResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*MetricsMetadataResponse) - if !ok { - that2, ok := that.(MetricsMetadataResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Metadata) != len(that1.Metadata) { - return false - } - for i := range this.Metadata { - if !this.Metadata[i].Equal(that1.Metadata[i]) { - return false - } - } - return true -} -func (this *TimeSeriesChunk) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TimeSeriesChunk) - if !ok { - that2, ok := that.(TimeSeriesChunk) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.FromIngesterId != that1.FromIngesterId { - return false - } - if this.UserId != that1.UserId { - return false - } - if len(this.Labels) != len(that1.Labels) { - return false - } - for i := range this.Labels { - if !this.Labels[i].Equal(that1.Labels[i]) { - return false - } - } - if len(this.Chunks) != len(that1.Chunks) { - return false - } - for i := range this.Chunks { - if !this.Chunks[i].Equal(&that1.Chunks[i]) { - return false - } - } - return true -} -func (this *Chunk) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Chunk) - if !ok { - that2, ok := that.(Chunk) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.StartTimestampMs != that1.StartTimestampMs { - return false - } - if this.EndTimestampMs != that1.EndTimestampMs { - return false - } - if this.Encoding != that1.Encoding { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *TransferChunksResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TransferChunksResponse) - if !ok { - that2, ok := that.(TransferChunksResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *LabelMatchers) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelMatchers) - if !ok { - that2, ok := that.(LabelMatchers) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Matchers) != len(that1.Matchers) { - return false - } - for i := range this.Matchers { - if !this.Matchers[i].Equal(that1.Matchers[i]) { - return false - } - } - return true -} -func (this *LabelMatcher) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LabelMatcher) - if !ok { - that2, ok := that.(LabelMatcher) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Type != that1.Type { - return false - } - if this.Name != that1.Name { - return false - } - if this.Value != that1.Value { - return false - } - return true -} -func (this *TimeSeriesFile) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TimeSeriesFile) - if !ok { - that2, ok := that.(TimeSeriesFile) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.FromIngesterId != that1.FromIngesterId { - return false - } - if this.UserId != that1.UserId { - return false - } - if this.Filename != that1.Filename { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *ReadRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.ReadRequest{") - if this.Queries != nil { - s = append(s, "Queries: "+fmt.Sprintf("%#v", this.Queries)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ReadResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.ReadResponse{") - if this.Results != nil { - s = append(s, "Results: "+fmt.Sprintf("%#v", this.Results)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&client.QueryRequest{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ExemplarQueryRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&client.ExemplarQueryRequest{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.QueryResponse{") - if this.Timeseries != nil { - vs := make([]*cortexpb.TimeSeries, len(this.Timeseries)) - for i := range vs { - vs[i] = &this.Timeseries[i] - } - s = append(s, "Timeseries: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QueryStreamResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&client.QueryStreamResponse{") - if this.Chunkseries != nil { - vs := make([]*TimeSeriesChunk, len(this.Chunkseries)) - for i := range vs { - vs[i] = &this.Chunkseries[i] - } - s = append(s, "Chunkseries: "+fmt.Sprintf("%#v", vs)+",\n") - } - if this.Timeseries != nil { - vs := make([]*cortexpb.TimeSeries, len(this.Timeseries)) - for i := range vs { - vs[i] = &this.Timeseries[i] - } - s = append(s, "Timeseries: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ExemplarQueryResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.ExemplarQueryResponse{") - if this.Timeseries != nil { - vs := make([]*cortexpb.TimeSeries, len(this.Timeseries)) - for i := range vs { - vs[i] = &this.Timeseries[i] - } - s = append(s, "Timeseries: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelValuesRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&client.LabelValuesRequest{") - s = append(s, "LabelName: "+fmt.Sprintf("%#v", this.LabelName)+",\n") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelValuesResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.LabelValuesResponse{") - s = append(s, "LabelValues: "+fmt.Sprintf("%#v", this.LabelValues)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelValuesStreamResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.LabelValuesStreamResponse{") - s = append(s, "LabelValues: "+fmt.Sprintf("%#v", this.LabelValues)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelNamesRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&client.LabelNamesRequest{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelNamesResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.LabelNamesResponse{") - s = append(s, "LabelNames: "+fmt.Sprintf("%#v", this.LabelNames)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelNamesStreamResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.LabelNamesStreamResponse{") - s = append(s, "LabelNames: "+fmt.Sprintf("%#v", this.LabelNames)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UserStatsRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&client.UserStatsRequest{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UserStatsResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&client.UserStatsResponse{") - s = append(s, "IngestionRate: "+fmt.Sprintf("%#v", this.IngestionRate)+",\n") - s = append(s, "NumSeries: "+fmt.Sprintf("%#v", this.NumSeries)+",\n") - s = append(s, "ApiIngestionRate: "+fmt.Sprintf("%#v", this.ApiIngestionRate)+",\n") - s = append(s, "RuleIngestionRate: "+fmt.Sprintf("%#v", this.RuleIngestionRate)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UserIDStatsResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&client.UserIDStatsResponse{") - s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") - if this.Data != nil { - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UsersStatsResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.UsersStatsResponse{") - if this.Stats != nil { - s = append(s, "Stats: "+fmt.Sprintf("%#v", this.Stats)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MetricsForLabelMatchersRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&client.MetricsForLabelMatchersRequest{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - if this.MatchersSet != nil { - s = append(s, "MatchersSet: "+fmt.Sprintf("%#v", this.MatchersSet)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MetricsForLabelMatchersResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.MetricsForLabelMatchersResponse{") - if this.Metric != nil { - s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MetricsForLabelMatchersStreamResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.MetricsForLabelMatchersStreamResponse{") - if this.Metric != nil { - s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MetricsMetadataRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&client.MetricsMetadataRequest{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MetricsMetadataResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.MetricsMetadataResponse{") - if this.Metadata != nil { - s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TimeSeriesChunk) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&client.TimeSeriesChunk{") - s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") - s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") - s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") - if this.Chunks != nil { - vs := make([]*Chunk, len(this.Chunks)) - for i := range vs { - vs[i] = &this.Chunks[i] - } - s = append(s, "Chunks: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Chunk) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&client.Chunk{") - s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n") - s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n") - s = append(s, "Encoding: "+fmt.Sprintf("%#v", this.Encoding)+",\n") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TransferChunksResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&client.TransferChunksResponse{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelMatchers) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&client.LabelMatchers{") - if this.Matchers != nil { - s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LabelMatcher) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&client.LabelMatcher{") - s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TimeSeriesFile) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&client.TimeSeriesFile{") - s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") - s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") - s = append(s, "Filename: "+fmt.Sprintf("%#v", this.Filename)+",\n") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringIngester(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// IngesterClient is the client API for Ingester service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type IngesterClient interface { - Push(ctx context.Context, in *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) - Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) - QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) - QueryExemplars(ctx context.Context, in *ExemplarQueryRequest, opts ...grpc.CallOption) (*ExemplarQueryResponse, error) - LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) - LabelValuesStream(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (Ingester_LabelValuesStreamClient, error) - LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) - LabelNamesStream(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (Ingester_LabelNamesStreamClient, error) - UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) - AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) - MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) - MetricsForLabelMatchersStream(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (Ingester_MetricsForLabelMatchersStreamClient, error) - MetricsMetadata(ctx context.Context, in *MetricsMetadataRequest, opts ...grpc.CallOption) (*MetricsMetadataResponse, error) - // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). - TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) -} - -type ingesterClient struct { - cc *grpc.ClientConn -} - -func NewIngesterClient(cc *grpc.ClientConn) IngesterClient { - return &ingesterClient{cc} -} - -func (c *ingesterClient) Push(ctx context.Context, in *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) { - out := new(cortexpb.WriteResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/Push", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) { - out := new(QueryResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/Query", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[0], "/cortex.Ingester/QueryStream", opts...) - if err != nil { - return nil, err - } - x := &ingesterQueryStreamClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Ingester_QueryStreamClient interface { - Recv() (*QueryStreamResponse, error) - grpc.ClientStream -} - -type ingesterQueryStreamClient struct { - grpc.ClientStream -} - -func (x *ingesterQueryStreamClient) Recv() (*QueryStreamResponse, error) { - m := new(QueryStreamResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *ingesterClient) QueryExemplars(ctx context.Context, in *ExemplarQueryRequest, opts ...grpc.CallOption) (*ExemplarQueryResponse, error) { - out := new(ExemplarQueryResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/QueryExemplars", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) { - out := new(LabelValuesResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelValues", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) LabelValuesStream(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (Ingester_LabelValuesStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[1], "/cortex.Ingester/LabelValuesStream", opts...) - if err != nil { - return nil, err - } - x := &ingesterLabelValuesStreamClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Ingester_LabelValuesStreamClient interface { - Recv() (*LabelValuesStreamResponse, error) - grpc.ClientStream -} - -type ingesterLabelValuesStreamClient struct { - grpc.ClientStream -} - -func (x *ingesterLabelValuesStreamClient) Recv() (*LabelValuesStreamResponse, error) { - m := new(LabelValuesStreamResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *ingesterClient) LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) { - out := new(LabelNamesResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/LabelNames", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) LabelNamesStream(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (Ingester_LabelNamesStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[2], "/cortex.Ingester/LabelNamesStream", opts...) - if err != nil { - return nil, err - } - x := &ingesterLabelNamesStreamClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Ingester_LabelNamesStreamClient interface { - Recv() (*LabelNamesStreamResponse, error) - grpc.ClientStream -} - -type ingesterLabelNamesStreamClient struct { - grpc.ClientStream -} - -func (x *ingesterLabelNamesStreamClient) Recv() (*LabelNamesStreamResponse, error) { - m := new(LabelNamesStreamResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *ingesterClient) UserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UserStatsResponse, error) { - out := new(UserStatsResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/UserStats", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) AllUserStats(ctx context.Context, in *UserStatsRequest, opts ...grpc.CallOption) (*UsersStatsResponse, error) { - out := new(UsersStatsResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/AllUserStats", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) MetricsForLabelMatchers(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (*MetricsForLabelMatchersResponse, error) { - out := new(MetricsForLabelMatchersResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/MetricsForLabelMatchers", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) MetricsForLabelMatchersStream(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (Ingester_MetricsForLabelMatchersStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[3], "/cortex.Ingester/MetricsForLabelMatchersStream", opts...) - if err != nil { - return nil, err - } - x := &ingesterMetricsForLabelMatchersStreamClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Ingester_MetricsForLabelMatchersStreamClient interface { - Recv() (*MetricsForLabelMatchersStreamResponse, error) - grpc.ClientStream -} - -type ingesterMetricsForLabelMatchersStreamClient struct { - grpc.ClientStream -} - -func (x *ingesterMetricsForLabelMatchersStreamClient) Recv() (*MetricsForLabelMatchersStreamResponse, error) { - m := new(MetricsForLabelMatchersStreamResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *ingesterClient) MetricsMetadata(ctx context.Context, in *MetricsMetadataRequest, opts ...grpc.CallOption) (*MetricsMetadataResponse, error) { - out := new(MetricsMetadataResponse) - err := c.cc.Invoke(ctx, "/cortex.Ingester/MetricsMetadata", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ingesterClient) TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) { - stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[4], "/cortex.Ingester/TransferChunks", opts...) - if err != nil { - return nil, err - } - x := &ingesterTransferChunksClient{stream} - return x, nil -} - -type Ingester_TransferChunksClient interface { - Send(*TimeSeriesChunk) error - CloseAndRecv() (*TransferChunksResponse, error) - grpc.ClientStream -} - -type ingesterTransferChunksClient struct { - grpc.ClientStream -} - -func (x *ingesterTransferChunksClient) Send(m *TimeSeriesChunk) error { - return x.ClientStream.SendMsg(m) -} - -func (x *ingesterTransferChunksClient) CloseAndRecv() (*TransferChunksResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(TransferChunksResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// IngesterServer is the server API for Ingester service. -type IngesterServer interface { - Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) - Query(context.Context, *QueryRequest) (*QueryResponse, error) - QueryStream(*QueryRequest, Ingester_QueryStreamServer) error - QueryExemplars(context.Context, *ExemplarQueryRequest) (*ExemplarQueryResponse, error) - LabelValues(context.Context, *LabelValuesRequest) (*LabelValuesResponse, error) - LabelValuesStream(*LabelValuesRequest, Ingester_LabelValuesStreamServer) error - LabelNames(context.Context, *LabelNamesRequest) (*LabelNamesResponse, error) - LabelNamesStream(*LabelNamesRequest, Ingester_LabelNamesStreamServer) error - UserStats(context.Context, *UserStatsRequest) (*UserStatsResponse, error) - AllUserStats(context.Context, *UserStatsRequest) (*UsersStatsResponse, error) - MetricsForLabelMatchers(context.Context, *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) - MetricsForLabelMatchersStream(*MetricsForLabelMatchersRequest, Ingester_MetricsForLabelMatchersStreamServer) error - MetricsMetadata(context.Context, *MetricsMetadataRequest) (*MetricsMetadataResponse, error) - // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). - TransferChunks(Ingester_TransferChunksServer) error -} - -// UnimplementedIngesterServer can be embedded to have forward compatible implementations. -type UnimplementedIngesterServer struct { -} - -func (*UnimplementedIngesterServer) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Push not implemented") -} -func (*UnimplementedIngesterServer) Query(ctx context.Context, req *QueryRequest) (*QueryResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") -} -func (*UnimplementedIngesterServer) QueryStream(req *QueryRequest, srv Ingester_QueryStreamServer) error { - return status.Errorf(codes.Unimplemented, "method QueryStream not implemented") -} -func (*UnimplementedIngesterServer) QueryExemplars(ctx context.Context, req *ExemplarQueryRequest) (*ExemplarQueryResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method QueryExemplars not implemented") -} -func (*UnimplementedIngesterServer) LabelValues(ctx context.Context, req *LabelValuesRequest) (*LabelValuesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LabelValues not implemented") -} -func (*UnimplementedIngesterServer) LabelValuesStream(req *LabelValuesRequest, srv Ingester_LabelValuesStreamServer) error { - return status.Errorf(codes.Unimplemented, "method LabelValuesStream not implemented") -} -func (*UnimplementedIngesterServer) LabelNames(ctx context.Context, req *LabelNamesRequest) (*LabelNamesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LabelNames not implemented") -} -func (*UnimplementedIngesterServer) LabelNamesStream(req *LabelNamesRequest, srv Ingester_LabelNamesStreamServer) error { - return status.Errorf(codes.Unimplemented, "method LabelNamesStream not implemented") -} -func (*UnimplementedIngesterServer) UserStats(ctx context.Context, req *UserStatsRequest) (*UserStatsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserStats not implemented") -} -func (*UnimplementedIngesterServer) AllUserStats(ctx context.Context, req *UserStatsRequest) (*UsersStatsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AllUserStats not implemented") -} -func (*UnimplementedIngesterServer) MetricsForLabelMatchers(ctx context.Context, req *MetricsForLabelMatchersRequest) (*MetricsForLabelMatchersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MetricsForLabelMatchers not implemented") -} -func (*UnimplementedIngesterServer) MetricsForLabelMatchersStream(req *MetricsForLabelMatchersRequest, srv Ingester_MetricsForLabelMatchersStreamServer) error { - return status.Errorf(codes.Unimplemented, "method MetricsForLabelMatchersStream not implemented") -} -func (*UnimplementedIngesterServer) MetricsMetadata(ctx context.Context, req *MetricsMetadataRequest) (*MetricsMetadataResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MetricsMetadata not implemented") -} -func (*UnimplementedIngesterServer) TransferChunks(srv Ingester_TransferChunksServer) error { - return status.Errorf(codes.Unimplemented, "method TransferChunks not implemented") -} - -func RegisterIngesterServer(s *grpc.Server, srv IngesterServer) { - s.RegisterService(&_Ingester_serviceDesc, srv) -} - -func _Ingester_Push_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(cortexpb.WriteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).Push(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/Push", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).Push(ctx, req.(*cortexpb.WriteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).Query(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/Query", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).Query(ctx, req.(*QueryRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_QueryStream_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(QueryRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(IngesterServer).QueryStream(m, &ingesterQueryStreamServer{stream}) -} - -type Ingester_QueryStreamServer interface { - Send(*QueryStreamResponse) error - grpc.ServerStream -} - -type ingesterQueryStreamServer struct { - grpc.ServerStream -} - -func (x *ingesterQueryStreamServer) Send(m *QueryStreamResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Ingester_QueryExemplars_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExemplarQueryRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).QueryExemplars(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/QueryExemplars", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).QueryExemplars(ctx, req.(*ExemplarQueryRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_LabelValues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LabelValuesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).LabelValues(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/LabelValues", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).LabelValues(ctx, req.(*LabelValuesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_LabelValuesStream_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(LabelValuesRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(IngesterServer).LabelValuesStream(m, &ingesterLabelValuesStreamServer{stream}) -} - -type Ingester_LabelValuesStreamServer interface { - Send(*LabelValuesStreamResponse) error - grpc.ServerStream -} - -type ingesterLabelValuesStreamServer struct { - grpc.ServerStream -} - -func (x *ingesterLabelValuesStreamServer) Send(m *LabelValuesStreamResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Ingester_LabelNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LabelNamesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).LabelNames(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/LabelNames", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).LabelNames(ctx, req.(*LabelNamesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_LabelNamesStream_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(LabelNamesRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(IngesterServer).LabelNamesStream(m, &ingesterLabelNamesStreamServer{stream}) -} - -type Ingester_LabelNamesStreamServer interface { - Send(*LabelNamesStreamResponse) error - grpc.ServerStream -} - -type ingesterLabelNamesStreamServer struct { - grpc.ServerStream -} - -func (x *ingesterLabelNamesStreamServer) Send(m *LabelNamesStreamResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Ingester_UserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UserStatsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).UserStats(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/UserStats", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).UserStats(ctx, req.(*UserStatsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_AllUserStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UserStatsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).AllUserStats(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/AllUserStats", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).AllUserStats(ctx, req.(*UserStatsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_MetricsForLabelMatchers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MetricsForLabelMatchersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).MetricsForLabelMatchers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/MetricsForLabelMatchers", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).MetricsForLabelMatchers(ctx, req.(*MetricsForLabelMatchersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_MetricsForLabelMatchersStream_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(MetricsForLabelMatchersRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(IngesterServer).MetricsForLabelMatchersStream(m, &ingesterMetricsForLabelMatchersStreamServer{stream}) -} - -type Ingester_MetricsForLabelMatchersStreamServer interface { - Send(*MetricsForLabelMatchersStreamResponse) error - grpc.ServerStream -} - -type ingesterMetricsForLabelMatchersStreamServer struct { - grpc.ServerStream -} - -func (x *ingesterMetricsForLabelMatchersStreamServer) Send(m *MetricsForLabelMatchersStreamResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Ingester_MetricsMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MetricsMetadataRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IngesterServer).MetricsMetadata(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cortex.Ingester/MetricsMetadata", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).MetricsMetadata(ctx, req.(*MetricsMetadataRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Ingester_TransferChunks_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(IngesterServer).TransferChunks(&ingesterTransferChunksServer{stream}) -} - -type Ingester_TransferChunksServer interface { - SendAndClose(*TransferChunksResponse) error - Recv() (*TimeSeriesChunk, error) - grpc.ServerStream -} - -type ingesterTransferChunksServer struct { - grpc.ServerStream -} - -func (x *ingesterTransferChunksServer) SendAndClose(m *TransferChunksResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *ingesterTransferChunksServer) Recv() (*TimeSeriesChunk, error) { - m := new(TimeSeriesChunk) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Ingester_serviceDesc = grpc.ServiceDesc{ - ServiceName: "cortex.Ingester", - HandlerType: (*IngesterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Push", - Handler: _Ingester_Push_Handler, - }, - { - MethodName: "Query", - Handler: _Ingester_Query_Handler, - }, - { - MethodName: "QueryExemplars", - Handler: _Ingester_QueryExemplars_Handler, - }, - { - MethodName: "LabelValues", - Handler: _Ingester_LabelValues_Handler, - }, - { - MethodName: "LabelNames", - Handler: _Ingester_LabelNames_Handler, - }, - { - MethodName: "UserStats", - Handler: _Ingester_UserStats_Handler, - }, - { - MethodName: "AllUserStats", - Handler: _Ingester_AllUserStats_Handler, - }, - { - MethodName: "MetricsForLabelMatchers", - Handler: _Ingester_MetricsForLabelMatchers_Handler, - }, - { - MethodName: "MetricsMetadata", - Handler: _Ingester_MetricsMetadata_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "QueryStream", - Handler: _Ingester_QueryStream_Handler, - ServerStreams: true, - }, - { - StreamName: "LabelValuesStream", - Handler: _Ingester_LabelValuesStream_Handler, - ServerStreams: true, - }, - { - StreamName: "LabelNamesStream", - Handler: _Ingester_LabelNamesStream_Handler, - ServerStreams: true, - }, - { - StreamName: "MetricsForLabelMatchersStream", - Handler: _Ingester_MetricsForLabelMatchersStream_Handler, - ServerStreams: true, - }, - { - StreamName: "TransferChunks", - Handler: _Ingester_TransferChunks_Handler, - ClientStreams: true, - }, - }, - Metadata: "ingester.proto", -} - -func (m *ReadRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Queries) > 0 { - for iNdEx := len(m.Queries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Queries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ReadResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Matchers) > 0 { - for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.EndTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.StartTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ExemplarQueryRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExemplarQueryRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExemplarQueryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Matchers) > 0 { - for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.EndTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.StartTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *QueryResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Timeseries) > 0 { - for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryStreamResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryStreamResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryStreamResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Timeseries) > 0 { - for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Chunkseries) > 0 { - for iNdEx := len(m.Chunkseries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Chunkseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ExemplarQueryResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExemplarQueryResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExemplarQueryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Timeseries) > 0 { - for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *LabelValuesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelValuesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Matchers != nil { - { - size, err := m.Matchers.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.EndTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x18 - } - if m.StartTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if len(m.LabelName) > 0 { - i -= len(m.LabelName) - copy(dAtA[i:], m.LabelName) - i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LabelValuesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelValuesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelValuesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LabelValues) > 0 { - for iNdEx := len(m.LabelValues) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.LabelValues[iNdEx]) - copy(dAtA[i:], m.LabelValues[iNdEx]) - i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelValues[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *LabelValuesStreamResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelValuesStreamResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelValuesStreamResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LabelValues) > 0 { - for iNdEx := len(m.LabelValues) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.LabelValues[iNdEx]) - copy(dAtA[i:], m.LabelValues[iNdEx]) - i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelValues[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *LabelNamesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelNamesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.EndTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.StartTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *LabelNamesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelNamesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelNamesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LabelNames) > 0 { - for iNdEx := len(m.LabelNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.LabelNames[iNdEx]) - copy(dAtA[i:], m.LabelNames[iNdEx]) - i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelNames[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *LabelNamesStreamResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelNamesStreamResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelNamesStreamResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LabelNames) > 0 { - for iNdEx := len(m.LabelNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.LabelNames[iNdEx]) - copy(dAtA[i:], m.LabelNames[iNdEx]) - i = encodeVarintIngester(dAtA, i, uint64(len(m.LabelNames[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *UserStatsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserStatsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UserStatsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *UserStatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserStatsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UserStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.RuleIngestionRate != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.RuleIngestionRate)))) - i-- - dAtA[i] = 0x21 - } - if m.ApiIngestionRate != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ApiIngestionRate)))) - i-- - dAtA[i] = 0x19 - } - if m.NumSeries != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.NumSeries)) - i-- - dAtA[i] = 0x10 - } - if m.IngestionRate != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.IngestionRate)))) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func (m *UserIDStatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserIDStatsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UserIDStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Data != nil { - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.UserId) > 0 { - i -= len(m.UserId) - copy(dAtA[i:], m.UserId) - i = encodeVarintIngester(dAtA, i, uint64(len(m.UserId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UsersStatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UsersStatsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UsersStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Stats) > 0 { - for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Stats[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *MetricsForLabelMatchersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsForLabelMatchersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsForLabelMatchersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.MatchersSet) > 0 { - for iNdEx := len(m.MatchersSet) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MatchersSet[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.EndTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.StartTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MetricsForLabelMatchersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsForLabelMatchersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsForLabelMatchersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Metric) > 0 { - for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *MetricsForLabelMatchersStreamResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsForLabelMatchersStreamResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsForLabelMatchersStreamResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Metric) > 0 { - for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *MetricsMetadataRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsMetadataRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsMetadataRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MetricsMetadataResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsMetadataResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsMetadataResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Metadata) > 0 { - for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *TimeSeriesChunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TimeSeriesChunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TimeSeriesChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Labels[iNdEx].Size() - i -= size - if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.UserId) > 0 { - i -= len(m.UserId) - copy(dAtA[i:], m.UserId) - i = encodeVarintIngester(dAtA, i, uint64(len(m.UserId))) - i-- - dAtA[i] = 0x12 - } - if len(m.FromIngesterId) > 0 { - i -= len(m.FromIngesterId) - copy(dAtA[i:], m.FromIngesterId) - i = encodeVarintIngester(dAtA, i, uint64(len(m.FromIngesterId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Chunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintIngester(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x22 - } - if m.Encoding != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.Encoding)) - i-- - dAtA[i] = 0x18 - } - if m.EndTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.EndTimestampMs)) - i-- - dAtA[i] = 0x10 - } - if m.StartTimestampMs != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.StartTimestampMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *TransferChunksResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TransferChunksResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TransferChunksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *LabelMatchers) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelMatchers) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelMatchers) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Matchers) > 0 { - for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIngester(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelMatcher) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintIngester(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x1a - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintIngester(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if m.Type != 0 { - i = encodeVarintIngester(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *TimeSeriesFile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TimeSeriesFile) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TimeSeriesFile) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintIngester(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x22 - } - if len(m.Filename) > 0 { - i -= len(m.Filename) - copy(dAtA[i:], m.Filename) - i = encodeVarintIngester(dAtA, i, uint64(len(m.Filename))) - i-- - dAtA[i] = 0x1a - } - if len(m.UserId) > 0 { - i -= len(m.UserId) - copy(dAtA[i:], m.UserId) - i = encodeVarintIngester(dAtA, i, uint64(len(m.UserId))) - i-- - dAtA[i] = 0x12 - } - if len(m.FromIngesterId) > 0 { - i -= len(m.FromIngesterId) - copy(dAtA[i:], m.FromIngesterId) - i = encodeVarintIngester(dAtA, i, uint64(len(m.FromIngesterId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintIngester(dAtA []byte, offset int, v uint64) int { - offset -= sovIngester(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ReadRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Queries) > 0 { - for _, e := range m.Queries { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *ReadResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Results) > 0 { - for _, e := range m.Results { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *QueryRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.EndTimestampMs)) - } - if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *ExemplarQueryRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.EndTimestampMs)) - } - if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *QueryResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *QueryStreamResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Chunkseries) > 0 { - for _, e := range m.Chunkseries { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *ExemplarQueryResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *LabelValuesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.LabelName) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - if m.StartTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.EndTimestampMs)) - } - if m.Matchers != nil { - l = m.Matchers.Size() - n += 1 + l + sovIngester(uint64(l)) - } - return n -} - -func (m *LabelValuesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.LabelValues) > 0 { - for _, s := range m.LabelValues { - l = len(s) - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *LabelValuesStreamResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.LabelValues) > 0 { - for _, s := range m.LabelValues { - l = len(s) - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *LabelNamesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.EndTimestampMs)) - } - return n -} - -func (m *LabelNamesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.LabelNames) > 0 { - for _, s := range m.LabelNames { - l = len(s) - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *LabelNamesStreamResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.LabelNames) > 0 { - for _, s := range m.LabelNames { - l = len(s) - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *UserStatsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *UserStatsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.IngestionRate != 0 { - n += 9 - } - if m.NumSeries != 0 { - n += 1 + sovIngester(uint64(m.NumSeries)) - } - if m.ApiIngestionRate != 0 { - n += 9 - } - if m.RuleIngestionRate != 0 { - n += 9 - } - return n -} - -func (m *UserIDStatsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.UserId) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - if m.Data != nil { - l = m.Data.Size() - n += 1 + l + sovIngester(uint64(l)) - } - return n -} - -func (m *UsersStatsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Stats) > 0 { - for _, e := range m.Stats { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *MetricsForLabelMatchersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.EndTimestampMs)) - } - if len(m.MatchersSet) > 0 { - for _, e := range m.MatchersSet { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *MetricsForLabelMatchersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Metric) > 0 { - for _, e := range m.Metric { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *MetricsForLabelMatchersStreamResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Metric) > 0 { - for _, e := range m.Metric { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *MetricsMetadataRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MetricsMetadataResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Metadata) > 0 { - for _, e := range m.Metadata { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *TimeSeriesChunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.FromIngesterId) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - l = len(m.UserId) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *Chunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StartTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.StartTimestampMs)) - } - if m.EndTimestampMs != 0 { - n += 1 + sovIngester(uint64(m.EndTimestampMs)) - } - if m.Encoding != 0 { - n += 1 + sovIngester(uint64(m.Encoding)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - return n -} - -func (m *TransferChunksResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *LabelMatchers) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Matchers) > 0 { - for _, e := range m.Matchers { - l = e.Size() - n += 1 + l + sovIngester(uint64(l)) - } - } - return n -} - -func (m *LabelMatcher) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovIngester(uint64(m.Type)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - return n -} - -func (m *TimeSeriesFile) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.FromIngesterId) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - l = len(m.UserId) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - l = len(m.Filename) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovIngester(uint64(l)) - } - return n -} - -func sovIngester(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozIngester(x uint64) (n int) { - return sovIngester(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ReadRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForQueries := "[]*QueryRequest{" - for _, f := range this.Queries { - repeatedStringForQueries += strings.Replace(f.String(), "QueryRequest", "QueryRequest", 1) + "," - } - repeatedStringForQueries += "}" - s := strings.Join([]string{`&ReadRequest{`, - `Queries:` + repeatedStringForQueries + `,`, - `}`, - }, "") - return s -} -func (this *ReadResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForResults := "[]*QueryResponse{" - for _, f := range this.Results { - repeatedStringForResults += strings.Replace(f.String(), "QueryResponse", "QueryResponse", 1) + "," - } - repeatedStringForResults += "}" - s := strings.Join([]string{`&ReadResponse{`, - `Results:` + repeatedStringForResults + `,`, - `}`, - }, "") - return s -} -func (this *QueryRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForMatchers := "[]*LabelMatcher{" - for _, f := range this.Matchers { - repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," - } - repeatedStringForMatchers += "}" - s := strings.Join([]string{`&QueryRequest{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Matchers:` + repeatedStringForMatchers + `,`, - `}`, - }, "") - return s -} -func (this *ExemplarQueryRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForMatchers := "[]*LabelMatchers{" - for _, f := range this.Matchers { - repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatchers", "LabelMatchers", 1) + "," - } - repeatedStringForMatchers += "}" - s := strings.Join([]string{`&ExemplarQueryRequest{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Matchers:` + repeatedStringForMatchers + `,`, - `}`, - }, "") - return s -} -func (this *QueryResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForTimeseries := "[]TimeSeries{" - for _, f := range this.Timeseries { - repeatedStringForTimeseries += fmt.Sprintf("%v", f) + "," - } - repeatedStringForTimeseries += "}" - s := strings.Join([]string{`&QueryResponse{`, - `Timeseries:` + repeatedStringForTimeseries + `,`, - `}`, - }, "") - return s -} -func (this *QueryStreamResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForChunkseries := "[]TimeSeriesChunk{" - for _, f := range this.Chunkseries { - repeatedStringForChunkseries += strings.Replace(strings.Replace(f.String(), "TimeSeriesChunk", "TimeSeriesChunk", 1), `&`, ``, 1) + "," - } - repeatedStringForChunkseries += "}" - repeatedStringForTimeseries := "[]TimeSeries{" - for _, f := range this.Timeseries { - repeatedStringForTimeseries += fmt.Sprintf("%v", f) + "," - } - repeatedStringForTimeseries += "}" - s := strings.Join([]string{`&QueryStreamResponse{`, - `Chunkseries:` + repeatedStringForChunkseries + `,`, - `Timeseries:` + repeatedStringForTimeseries + `,`, - `}`, - }, "") - return s -} -func (this *ExemplarQueryResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForTimeseries := "[]TimeSeries{" - for _, f := range this.Timeseries { - repeatedStringForTimeseries += fmt.Sprintf("%v", f) + "," - } - repeatedStringForTimeseries += "}" - s := strings.Join([]string{`&ExemplarQueryResponse{`, - `Timeseries:` + repeatedStringForTimeseries + `,`, - `}`, - }, "") - return s -} -func (this *LabelValuesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelValuesRequest{`, - `LabelName:` + fmt.Sprintf("%v", this.LabelName) + `,`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Matchers:` + strings.Replace(this.Matchers.String(), "LabelMatchers", "LabelMatchers", 1) + `,`, - `}`, - }, "") - return s -} -func (this *LabelValuesResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelValuesResponse{`, - `LabelValues:` + fmt.Sprintf("%v", this.LabelValues) + `,`, - `}`, - }, "") - return s -} -func (this *LabelValuesStreamResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelValuesStreamResponse{`, - `LabelValues:` + fmt.Sprintf("%v", this.LabelValues) + `,`, - `}`, - }, "") - return s -} -func (this *LabelNamesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelNamesRequest{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `}`, - }, "") - return s -} -func (this *LabelNamesResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelNamesResponse{`, - `LabelNames:` + fmt.Sprintf("%v", this.LabelNames) + `,`, - `}`, - }, "") - return s -} -func (this *LabelNamesStreamResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelNamesStreamResponse{`, - `LabelNames:` + fmt.Sprintf("%v", this.LabelNames) + `,`, - `}`, - }, "") - return s -} -func (this *UserStatsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UserStatsRequest{`, - `}`, - }, "") - return s -} -func (this *UserStatsResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UserStatsResponse{`, - `IngestionRate:` + fmt.Sprintf("%v", this.IngestionRate) + `,`, - `NumSeries:` + fmt.Sprintf("%v", this.NumSeries) + `,`, - `ApiIngestionRate:` + fmt.Sprintf("%v", this.ApiIngestionRate) + `,`, - `RuleIngestionRate:` + fmt.Sprintf("%v", this.RuleIngestionRate) + `,`, - `}`, - }, "") - return s -} -func (this *UserIDStatsResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UserIDStatsResponse{`, - `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Data:` + strings.Replace(this.Data.String(), "UserStatsResponse", "UserStatsResponse", 1) + `,`, - `}`, - }, "") - return s -} -func (this *UsersStatsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForStats := "[]*UserIDStatsResponse{" - for _, f := range this.Stats { - repeatedStringForStats += strings.Replace(f.String(), "UserIDStatsResponse", "UserIDStatsResponse", 1) + "," - } - repeatedStringForStats += "}" - s := strings.Join([]string{`&UsersStatsResponse{`, - `Stats:` + repeatedStringForStats + `,`, - `}`, - }, "") - return s -} -func (this *MetricsForLabelMatchersRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForMatchersSet := "[]*LabelMatchers{" - for _, f := range this.MatchersSet { - repeatedStringForMatchersSet += strings.Replace(f.String(), "LabelMatchers", "LabelMatchers", 1) + "," - } - repeatedStringForMatchersSet += "}" - s := strings.Join([]string{`&MetricsForLabelMatchersRequest{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `MatchersSet:` + repeatedStringForMatchersSet + `,`, - `}`, - }, "") - return s -} -func (this *MetricsForLabelMatchersResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForMetric := "[]*Metric{" - for _, f := range this.Metric { - repeatedStringForMetric += strings.Replace(fmt.Sprintf("%v", f), "Metric", "cortexpb.Metric", 1) + "," - } - repeatedStringForMetric += "}" - s := strings.Join([]string{`&MetricsForLabelMatchersResponse{`, - `Metric:` + repeatedStringForMetric + `,`, - `}`, - }, "") - return s -} -func (this *MetricsForLabelMatchersStreamResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForMetric := "[]*Metric{" - for _, f := range this.Metric { - repeatedStringForMetric += strings.Replace(fmt.Sprintf("%v", f), "Metric", "cortexpb.Metric", 1) + "," - } - repeatedStringForMetric += "}" - s := strings.Join([]string{`&MetricsForLabelMatchersStreamResponse{`, - `Metric:` + repeatedStringForMetric + `,`, - `}`, - }, "") - return s -} -func (this *MetricsMetadataRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricsMetadataRequest{`, - `}`, - }, "") - return s -} -func (this *MetricsMetadataResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForMetadata := "[]*MetricMetadata{" - for _, f := range this.Metadata { - repeatedStringForMetadata += strings.Replace(fmt.Sprintf("%v", f), "MetricMetadata", "cortexpb.MetricMetadata", 1) + "," - } - repeatedStringForMetadata += "}" - s := strings.Join([]string{`&MetricsMetadataResponse{`, - `Metadata:` + repeatedStringForMetadata + `,`, - `}`, - }, "") - return s -} -func (this *TimeSeriesChunk) String() string { - if this == nil { - return "nil" - } - repeatedStringForChunks := "[]Chunk{" - for _, f := range this.Chunks { - repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), "Chunk", "Chunk", 1), `&`, ``, 1) + "," - } - repeatedStringForChunks += "}" - s := strings.Join([]string{`&TimeSeriesChunk{`, - `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, - `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, - `Chunks:` + repeatedStringForChunks + `,`, - `}`, - }, "") - return s -} -func (this *Chunk) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Chunk{`, - `StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`, - `EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`, - `Encoding:` + fmt.Sprintf("%v", this.Encoding) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *TransferChunksResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TransferChunksResponse{`, - `}`, - }, "") - return s -} -func (this *LabelMatchers) String() string { - if this == nil { - return "nil" - } - repeatedStringForMatchers := "[]*LabelMatcher{" - for _, f := range this.Matchers { - repeatedStringForMatchers += strings.Replace(f.String(), "LabelMatcher", "LabelMatcher", 1) + "," - } - repeatedStringForMatchers += "}" - s := strings.Join([]string{`&LabelMatchers{`, - `Matchers:` + repeatedStringForMatchers + `,`, - `}`, - }, "") - return s -} -func (this *LabelMatcher) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LabelMatcher{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *TimeSeriesFile) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TimeSeriesFile{`, - `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, - `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, - `Filename:` + fmt.Sprintf("%v", this.Filename) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func valueToStringIngester(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ReadRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Queries = append(m.Queries, &QueryRequest{}) - if err := m.Queries[len(m.Queries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Results = append(m.Results, &QueryResponse{}) - if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Matchers = append(m.Matchers, &LabelMatcher{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExemplarQueryRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExemplarQueryRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExemplarQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Matchers = append(m.Matchers, &LabelMatchers{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeseries = append(m.Timeseries, cortexpb.TimeSeries{}) - if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryStreamResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryStreamResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunkseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunkseries = append(m.Chunkseries, TimeSeriesChunk{}) - if err := m.Chunkseries[len(m.Chunkseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeseries = append(m.Timeseries, cortexpb.TimeSeries{}) - if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExemplarQueryResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExemplarQueryResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExemplarQueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeseries = append(m.Timeseries, cortexpb.TimeSeries{}) - if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelValuesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Matchers == nil { - m.Matchers = &LabelMatchers{} - } - if err := m.Matchers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelValuesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelValues", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelValues = append(m.LabelValues, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelValuesStreamResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelValuesStreamResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelValuesStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelValues", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelValues = append(m.LabelValues, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelNamesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelNamesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelNames = append(m.LabelNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelNamesStreamResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelNamesStreamResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelNamesStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelNames = append(m.LabelNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserStatsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserStatsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserStatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserStatsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserStatsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field IngestionRate", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.IngestionRate = float64(math.Float64frombits(v)) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumSeries", wireType) - } - m.NumSeries = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumSeries |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ApiIngestionRate", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.ApiIngestionRate = float64(math.Float64frombits(v)) - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field RuleIngestionRate", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.RuleIngestionRate = float64(math.Float64frombits(v)) - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserIDStatsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserIDStatsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserIDStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Data == nil { - m.Data = &UserStatsResponse{} - } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UsersStatsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UsersStatsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UsersStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stats = append(m.Stats, &UserIDStatsResponse{}) - if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsForLabelMatchersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsForLabelMatchersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsForLabelMatchersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchersSet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchersSet = append(m.MatchersSet, &LabelMatchers{}) - if err := m.MatchersSet[len(m.MatchersSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsForLabelMatchersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsForLabelMatchersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsForLabelMatchersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metric = append(m.Metric, &cortexpb.Metric{}) - if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsForLabelMatchersStreamResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsForLabelMatchersStreamResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsForLabelMatchersStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metric = append(m.Metric, &cortexpb.Metric{}) - if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsMetadataRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsMetadataRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsMetadataRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsMetadataResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsMetadataResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsMetadataResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metadata = append(m.Metadata, &cortexpb.MetricMetadata{}) - if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TimeSeriesChunk: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeriesChunk: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FromIngesterId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_cortexpb.LabelAdapter{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Chunks = append(m.Chunks, Chunk{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Chunk) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Chunk: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) - } - m.StartTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) - } - m.EndTimestampMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndTimestampMs |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Encoding", wireType) - } - m.Encoding = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Encoding |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TransferChunksResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TransferChunksResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelMatchers) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelMatchers: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelMatchers: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Matchers = append(m.Matchers, &LabelMatcher{}) - if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelMatcher) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelMatcher: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelMatcher: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= MatchType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TimeSeriesFile) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TimeSeriesFile: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeriesFile: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FromIngesterId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filename = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIngester - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthIngester - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIngester - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIngester(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIngester - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipIngester(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIngester - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIngester - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIngester - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthIngester - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthIngester - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIngester - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipIngester(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthIngester - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthIngester = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowIngester = fmt.Errorf("proto: integer overflow") -) diff --git a/internal/cortex/ingester/client/ingester.proto b/internal/cortex/ingester/client/ingester.proto deleted file mode 100644 index 4534e5cdef..0000000000 --- a/internal/cortex/ingester/client/ingester.proto +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -syntax = "proto3"; - -// TODO: Rename to ingesterpb -package cortex; - -option go_package = "client"; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "github.com/thanos-io/thanos/internal/cortex/cortexpb/cortex.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -service Ingester { - rpc Push(cortexpb.WriteRequest) returns (cortexpb.WriteResponse) {}; - rpc Query(QueryRequest) returns (QueryResponse) {}; - rpc QueryStream(QueryRequest) returns (stream QueryStreamResponse) {}; - rpc QueryExemplars(ExemplarQueryRequest) returns (ExemplarQueryResponse) {}; - - rpc LabelValues(LabelValuesRequest) returns (LabelValuesResponse) {}; - rpc LabelValuesStream(LabelValuesRequest) returns (stream LabelValuesStreamResponse) {}; - rpc LabelNames(LabelNamesRequest) returns (LabelNamesResponse) {}; - rpc LabelNamesStream(LabelNamesRequest) returns (stream LabelNamesStreamResponse) {}; - rpc UserStats(UserStatsRequest) returns (UserStatsResponse) {}; - rpc AllUserStats(UserStatsRequest) returns (UsersStatsResponse) {}; - rpc MetricsForLabelMatchers(MetricsForLabelMatchersRequest) returns (MetricsForLabelMatchersResponse) {}; - rpc MetricsForLabelMatchersStream(MetricsForLabelMatchersRequest) returns (stream MetricsForLabelMatchersStreamResponse) {}; - rpc MetricsMetadata(MetricsMetadataRequest) returns (MetricsMetadataResponse) {}; - - // TransferChunks allows leaving ingester (client) to stream chunks directly to joining ingesters (server). - rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {}; -} - -message ReadRequest { - repeated QueryRequest queries = 1; -} - -message ReadResponse { - repeated QueryResponse results = 1; -} - -message QueryRequest { - int64 start_timestamp_ms = 1; - int64 end_timestamp_ms = 2; - repeated LabelMatcher matchers = 3; -} - -message ExemplarQueryRequest { - int64 start_timestamp_ms = 1; - int64 end_timestamp_ms = 2; - repeated LabelMatchers matchers = 3; -} - -message QueryResponse { - repeated cortexpb.TimeSeries timeseries = 1 [(gogoproto.nullable) = false]; -} - -// QueryStreamResponse contains a batch of timeseries chunks or timeseries. Only one of these series will be populated. -message QueryStreamResponse { - repeated TimeSeriesChunk chunkseries = 1 [(gogoproto.nullable) = false]; - repeated cortexpb.TimeSeries timeseries = 2 [(gogoproto.nullable) = false]; -} - -message ExemplarQueryResponse { - repeated cortexpb.TimeSeries timeseries = 1 [(gogoproto.nullable) = false]; -} - -message LabelValuesRequest { - string label_name = 1; - int64 start_timestamp_ms = 2; - int64 end_timestamp_ms = 3; - LabelMatchers matchers = 4; -} - -message LabelValuesResponse { - repeated string label_values = 1; -} - -message LabelValuesStreamResponse { - repeated string label_values = 1; -} - -message LabelNamesRequest { - int64 start_timestamp_ms = 1; - int64 end_timestamp_ms = 2; -} - -message LabelNamesResponse { - repeated string label_names = 1; -} - -message LabelNamesStreamResponse { - repeated string label_names = 1; -} - -message UserStatsRequest {} - -message UserStatsResponse { - double ingestion_rate = 1; - uint64 num_series = 2; - double api_ingestion_rate = 3; - double rule_ingestion_rate = 4; -} - -message UserIDStatsResponse { - string user_id = 1; - UserStatsResponse data = 2; -} - -message UsersStatsResponse { - repeated UserIDStatsResponse stats = 1; -} - -message MetricsForLabelMatchersRequest { - int64 start_timestamp_ms = 1; - int64 end_timestamp_ms = 2; - repeated LabelMatchers matchers_set = 3; -} - -message MetricsForLabelMatchersResponse { - repeated cortexpb.Metric metric = 1; -} - -message MetricsForLabelMatchersStreamResponse { - repeated cortexpb.Metric metric = 1; -} - -message MetricsMetadataRequest { -} - -message MetricsMetadataResponse { - repeated cortexpb.MetricMetadata metadata = 1; -} - -message TimeSeriesChunk { - string from_ingester_id = 1; - string user_id = 2; - repeated cortexpb.LabelPair labels = 3 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/thanos-io/thanos/internal/cortex/cortexpb.LabelAdapter"]; - repeated Chunk chunks = 4 [(gogoproto.nullable) = false]; -} - -message Chunk { - int64 start_timestamp_ms = 1; - int64 end_timestamp_ms = 2; - int32 encoding = 3; - bytes data = 4; -} - -message TransferChunksResponse { -} - -message LabelMatchers { - repeated LabelMatcher matchers = 1; -} - -enum MatchType { - EQUAL = 0; - NOT_EQUAL = 1; - REGEX_MATCH = 2; - REGEX_NO_MATCH = 3; -} - -message LabelMatcher { - MatchType type = 1; - string name = 2; - string value = 3; -} - -message TimeSeriesFile { - string from_ingester_id = 1; - string user_id = 2; - string filename = 3; - bytes data = 4; -} diff --git a/internal/cortex/prom1/storage/metric/metric.go b/internal/cortex/prom1/storage/metric/metric.go deleted file mode 100644 index df4e2b9cfc..0000000000 --- a/internal/cortex/prom1/storage/metric/metric.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -// This file was taken from Prometheus (https://github.com/prometheus/prometheus). -// The original license header is included below: -// -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric - -import "github.com/prometheus/common/model" - -// Metric wraps a model.Metric and copies it upon modification if Copied is false. -type Metric struct { - Copied bool - Metric model.Metric -} - -// Set sets a label name in the wrapped Metric to a given value and copies the -// Metric initially, if it is not already a copy. -func (m *Metric) Set(ln model.LabelName, lv model.LabelValue) { - m.Copy() - m.Metric[ln] = lv -} - -// Del deletes a given label name from the wrapped Metric and copies the -// Metric initially, if it is not already a copy. -func (m *Metric) Del(ln model.LabelName) { - m.Copy() - delete(m.Metric, ln) -} - -// Get the value for the given label name. An empty value is returned -// if the label does not exist in the metric. -func (m *Metric) Get(ln model.LabelName) model.LabelValue { - return m.Metric[ln] -} - -// Gets behaves as Get but the returned boolean is false iff the label -// does not exist. -func (m *Metric) Gets(ln model.LabelName) (model.LabelValue, bool) { - lv, ok := m.Metric[ln] - return lv, ok -} - -// Copy the underlying Metric if it is not already a copy. -func (m *Metric) Copy() *Metric { - if !m.Copied { - m.Metric = m.Metric.Clone() - m.Copied = true - } - return m -} - -// String implements fmt.Stringer. -func (m Metric) String() string { - return m.Metric.String() -} diff --git a/internal/cortex/prom1/storage/metric/metric_test.go b/internal/cortex/prom1/storage/metric/metric_test.go deleted file mode 100644 index f25f23ec04..0000000000 --- a/internal/cortex/prom1/storage/metric/metric_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -// This file was taken from Prometheus (https://github.com/prometheus/prometheus). -// The original license header is included below: -// -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric - -import ( - "testing" - - "github.com/prometheus/common/model" -) - -func TestMetric(t *testing.T) { - testMetric := model.Metric{ - "to_delete": "test1", - "to_change": "test2", - } - - scenarios := []struct { - fn func(*Metric) - out model.Metric - }{ - { - fn: func(cm *Metric) { - cm.Del("to_delete") - }, - out: model.Metric{ - "to_change": "test2", - }, - }, - { - fn: func(cm *Metric) { - cm.Set("to_change", "changed") - }, - out: model.Metric{ - "to_delete": "test1", - "to_change": "changed", - }, - }, - } - - for i, s := range scenarios { - orig := testMetric.Clone() - cm := &Metric{ - Metric: orig, - Copied: false, - } - - s.fn(cm) - - // Test that the original metric was not modified. - if !orig.Equal(testMetric) { - t.Fatalf("%d. original metric changed; expected %v, got %v", i, testMetric, orig) - } - - // Test that the new metric has the right changes. - if !cm.Metric.Equal(s.out) { - t.Fatalf("%d. copied metric doesn't contain expected changes; expected %v, got %v", i, s.out, cm.Metric) - } - } -} diff --git a/internal/cortex/prom1/storage/metric/sample.go b/internal/cortex/prom1/storage/metric/sample.go deleted file mode 100644 index d5eb51ffe2..0000000000 --- a/internal/cortex/prom1/storage/metric/sample.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -// This file was taken from Prometheus (https://github.com/prometheus/prometheus). -// The original license header is included below: -// -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric - -import "github.com/prometheus/common/model" - -// Interval describes the inclusive interval between two Timestamps. -type Interval struct { - OldestInclusive model.Time - NewestInclusive model.Time -} diff --git a/internal/cortex/querier/astmapper/astmapper.go b/internal/cortex/querier/astmapper/astmapper.go deleted file mode 100644 index f63edbee4e..0000000000 --- a/internal/cortex/querier/astmapper/astmapper.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package astmapper - -import ( - "github.com/pkg/errors" - "github.com/prometheus/prometheus/promql/parser" -) - -// ASTMapper is the exported interface for mapping between multiple AST representations -type ASTMapper interface { - Map(node parser.Node) (parser.Node, error) -} - -// MapperFunc is a function adapter for ASTMapper -type MapperFunc func(node parser.Node) (parser.Node, error) - -// Map applies a mapperfunc as an ASTMapper -func (fn MapperFunc) Map(node parser.Node) (parser.Node, error) { - return fn(node) -} - -// MultiMapper can compose multiple ASTMappers -type MultiMapper struct { - mappers []ASTMapper -} - -// Map implements ASTMapper -func (m *MultiMapper) Map(node parser.Node) (parser.Node, error) { - var result parser.Node = node - var err error - - if len(m.mappers) == 0 { - return nil, errors.New("MultiMapper: No mappers registered") - } - - for _, x := range m.mappers { - result, err = x.Map(result) - if err != nil { - return nil, err - } - } - return result, nil - -} - -// Register adds ASTMappers into a multimapper. -// Since registered functions are applied in the order they're registered, it's advised to register them -// in decreasing priority and only operate on nodes that each function cares about, defaulting to CloneNode. -func (m *MultiMapper) Register(xs ...ASTMapper) { - m.mappers = append(m.mappers, xs...) -} - -// NewMultiMapper instaniates an ASTMapper from multiple ASTMappers -func NewMultiMapper(xs ...ASTMapper) *MultiMapper { - m := &MultiMapper{} - m.Register(xs...) - return m -} - -// CloneNode is a helper function to clone a node. -func CloneNode(node parser.Node) (parser.Node, error) { - return parser.ParseExpr(node.String()) -} - -// NodeMapper either maps a single AST node or returns the unaltered node. -// It also returns a bool to signal that no further recursion is necessary. -// This is helpful because it allows mappers to only implement logic for node types they want to change. -// It makes some mappers trivially easy to implement -type NodeMapper interface { - MapNode(node parser.Node) (mapped parser.Node, finished bool, err error) -} - -// NewASTNodeMapper creates an ASTMapper from a NodeMapper -func NewASTNodeMapper(mapper NodeMapper) ASTNodeMapper { - return ASTNodeMapper{mapper} -} - -// ASTNodeMapper is an ASTMapper adapter which uses a NodeMapper internally. -type ASTNodeMapper struct { - NodeMapper -} - -// Map implements ASTMapper from a NodeMapper -func (nm ASTNodeMapper) Map(node parser.Node) (parser.Node, error) { - node, fin, err := nm.MapNode(node) - - if err != nil { - return nil, err - } - - if fin { - return node, nil - } - - switch n := node.(type) { - case nil: - // nil handles cases where we check optional fields that are not set - return nil, nil - - case parser.Expressions: - for i, e := range n { - mapped, err := nm.Map(e) - if err != nil { - return nil, err - } - n[i] = mapped.(parser.Expr) - } - return n, nil - - case *parser.AggregateExpr: - expr, err := nm.Map(n.Expr) - if err != nil { - return nil, err - } - n.Expr = expr.(parser.Expr) - return n, nil - - case *parser.BinaryExpr: - lhs, err := nm.Map(n.LHS) - if err != nil { - return nil, err - } - n.LHS = lhs.(parser.Expr) - - rhs, err := nm.Map(n.RHS) - if err != nil { - return nil, err - } - n.RHS = rhs.(parser.Expr) - return n, nil - - case *parser.Call: - for i, e := range n.Args { - mapped, err := nm.Map(e) - if err != nil { - return nil, err - } - n.Args[i] = mapped.(parser.Expr) - } - return n, nil - - case *parser.SubqueryExpr: - mapped, err := nm.Map(n.Expr) - if err != nil { - return nil, err - } - n.Expr = mapped.(parser.Expr) - return n, nil - - case *parser.ParenExpr: - mapped, err := nm.Map(n.Expr) - if err != nil { - return nil, err - } - n.Expr = mapped.(parser.Expr) - return n, nil - - case *parser.UnaryExpr: - mapped, err := nm.Map(n.Expr) - if err != nil { - return nil, err - } - n.Expr = mapped.(parser.Expr) - return n, nil - - case *parser.EvalStmt: - mapped, err := nm.Map(n.Expr) - if err != nil { - return nil, err - } - n.Expr = mapped.(parser.Expr) - return n, nil - - case *parser.NumberLiteral, *parser.StringLiteral, *parser.VectorSelector, *parser.MatrixSelector: - return n, nil - - default: - panic(errors.Errorf("nodeMapper: unhandled node type %T", node)) - } -} diff --git a/internal/cortex/querier/astmapper/astmapper_test.go b/internal/cortex/querier/astmapper/astmapper_test.go deleted file mode 100644 index 57b871589d..0000000000 --- a/internal/cortex/querier/astmapper/astmapper_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package astmapper - -import ( - "fmt" - "testing" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql/parser" - "github.com/stretchr/testify/require" -) - -func TestCloneNode(t *testing.T) { - var testExpr = []struct { - input parser.Expr - expected parser.Expr - }{ - // simple unmodified case - { - &parser.BinaryExpr{ - Op: parser.ADD, - LHS: &parser.NumberLiteral{Val: 1}, - RHS: &parser.NumberLiteral{Val: 1}, - }, - &parser.BinaryExpr{ - Op: parser.ADD, - LHS: &parser.NumberLiteral{Val: 1, PosRange: parser.PositionRange{Start: 0, End: 1}}, - RHS: &parser.NumberLiteral{Val: 1, PosRange: parser.PositionRange{Start: 4, End: 5}}, - }, - }, - { - &parser.AggregateExpr{ - Op: parser.SUM, - Without: true, - Expr: &parser.VectorSelector{ - Name: "some_metric", - LabelMatchers: []*labels.Matcher{ - mustLabelMatcher(labels.MatchEqual, string(model.MetricNameLabel), "some_metric"), - }, - }, - Grouping: []string{"foo"}, - }, - &parser.AggregateExpr{ - Op: parser.SUM, - Without: true, - Expr: &parser.VectorSelector{ - Name: "some_metric", - LabelMatchers: []*labels.Matcher{ - mustLabelMatcher(labels.MatchEqual, string(model.MetricNameLabel), "some_metric"), - }, - PosRange: parser.PositionRange{ - Start: 19, - End: 30, - }, - }, - Grouping: []string{"foo"}, - PosRange: parser.PositionRange{ - Start: 0, - End: 31, - }, - }, - }, - } - - for i, c := range testExpr { - t.Run(fmt.Sprintf("[%d]", i), func(t *testing.T) { - res, err := CloneNode(c.input) - require.NoError(t, err) - require.Equal(t, c.expected, res) - }) - } -} - -func TestCloneNode_String(t *testing.T) { - var testExpr = []struct { - input string - expected string - }{ - { - input: `rate(http_requests_total{cluster="us-central1"}[1m])`, - expected: `rate(http_requests_total{cluster="us-central1"}[1m])`, - }, - { - input: `sum( -sum(rate(http_requests_total{cluster="us-central1"}[1m])) -/ -sum(rate(http_requests_total{cluster="ops-tools1"}[1m])) -)`, - expected: `sum(sum(rate(http_requests_total{cluster="us-central1"}[1m])) / sum(rate(http_requests_total{cluster="ops-tools1"}[1m])))`, - }, - } - - for i, c := range testExpr { - t.Run(fmt.Sprintf("[%d]", i), func(t *testing.T) { - expr, err := parser.ParseExpr(c.input) - require.Nil(t, err) - res, err := CloneNode(expr) - require.Nil(t, err) - require.Equal(t, c.expected, res.String()) - }) - } -} - -func mustLabelMatcher(mt labels.MatchType, name, val string) *labels.Matcher { - m, err := labels.NewMatcher(mt, name, val) - if err != nil { - panic(err) - } - return m -} diff --git a/internal/cortex/querier/astmapper/embedded.go b/internal/cortex/querier/astmapper/embedded.go deleted file mode 100644 index bade1405ce..0000000000 --- a/internal/cortex/querier/astmapper/embedded.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package astmapper - -import ( - "encoding/json" - - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql/parser" -) - -/* -Design: - -The prometheus api package enforces a (*promql.Engine argument), making it infeasible to do lazy AST -evaluation and substitution from within this package. -This leaves the (storage.Queryable) interface as the remaining target for conducting application level sharding. - -The main idea is to analyze the AST and determine which subtrees can be parallelized. With those in hand, the queries may -be remapped into vector or matrix selectors utilizing a reserved label containing the original query. These may then be parallelized in the storage implementation. -*/ - -const ( - // QueryLabel is a reserved label containing an embedded query - QueryLabel = "__cortex_queries__" - // EmbeddedQueriesMetricName is a reserved label (metric name) denoting an embedded query - EmbeddedQueriesMetricName = "__embedded_queries__" -) - -// EmbeddedQueries is a wrapper type for encoding queries -type EmbeddedQueries struct { - Concat []string `json:"Concat"` -} - -// JSONCodec is a Codec that uses JSON representations of EmbeddedQueries structs -var JSONCodec jsonCodec - -type jsonCodec struct{} - -func (c jsonCodec) Encode(queries []string) (string, error) { - embedded := EmbeddedQueries{ - Concat: queries, - } - b, err := json.Marshal(embedded) - return string(b), err -} - -func (c jsonCodec) Decode(encoded string) (queries []string, err error) { - var embedded EmbeddedQueries - err = json.Unmarshal([]byte(encoded), &embedded) - if err != nil { - return nil, err - } - - return embedded.Concat, nil -} - -// VectorSquash reduces an AST into a single vector query which can be hijacked by a Queryable impl. -// It always uses a VectorSelector as the substitution node. -// This is important because logical/set binops can only be applied against vectors and not matrices. -func VectorSquasher(nodes ...parser.Node) (parser.Expr, error) { - - // concat OR legs - strs := make([]string, 0, len(nodes)) - for _, node := range nodes { - strs = append(strs, node.String()) - } - - encoded, err := JSONCodec.Encode(strs) - if err != nil { - return nil, err - } - - embeddedQuery, err := labels.NewMatcher(labels.MatchEqual, QueryLabel, encoded) - if err != nil { - return nil, err - } - - return &parser.VectorSelector{ - Name: EmbeddedQueriesMetricName, - LabelMatchers: []*labels.Matcher{embeddedQuery}, - }, nil - -} diff --git a/internal/cortex/querier/astmapper/parallel.go b/internal/cortex/querier/astmapper/parallel.go deleted file mode 100644 index bf3801e81c..0000000000 --- a/internal/cortex/querier/astmapper/parallel.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package astmapper - -import ( - "fmt" - - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/promql/parser" - - util_log "github.com/thanos-io/thanos/internal/cortex/util/log" -) - -var summableAggregates = map[parser.ItemType]struct{}{ - parser.SUM: {}, - parser.MIN: {}, - parser.MAX: {}, - parser.TOPK: {}, - parser.BOTTOMK: {}, - parser.COUNT: {}, -} - -var nonParallelFuncs = []string{ - "histogram_quantile", - "quantile_over_time", - "absent", -} - -// CanParallelize tests if a subtree is parallelizable. -// A subtree is parallelizable if all of its components are parallelizable. -func CanParallelize(node parser.Node) bool { - switch n := node.(type) { - case nil: - // nil handles cases where we check optional fields that are not set - return true - - case parser.Expressions: - for _, e := range n { - if !CanParallelize(e) { - return false - } - } - return true - - case *parser.AggregateExpr: - _, ok := summableAggregates[n.Op] - if !ok { - return false - } - - // Ensure there are no nested aggregations - nestedAggs, err := Predicate(n.Expr, func(node parser.Node) (bool, error) { - _, ok := node.(*parser.AggregateExpr) - return ok, nil - }) - - return err == nil && !nestedAggs && CanParallelize(n.Expr) - - case *parser.BinaryExpr: - // since binary exprs use each side for merging, they cannot be parallelized - return false - - case *parser.Call: - if n.Func == nil { - return false - } - if !ParallelizableFunc(*n.Func) { - return false - } - - for _, e := range n.Args { - if !CanParallelize(e) { - return false - } - } - return true - - case *parser.SubqueryExpr: - return CanParallelize(n.Expr) - - case *parser.ParenExpr: - return CanParallelize(n.Expr) - - case *parser.UnaryExpr: - // Since these are only currently supported for Scalars, should be parallel-compatible - return true - - case *parser.EvalStmt: - return CanParallelize(n.Expr) - - case *parser.MatrixSelector, *parser.NumberLiteral, *parser.StringLiteral, *parser.VectorSelector: - return true - - default: - level.Error(util_log.Logger).Log("err", fmt.Sprintf("CanParallel: unhandled node type %T", node)) //lint:ignore faillint allow global logger for now - return false - } - -} - -// ParallelizableFunc ensures that a promql function can be part of a parallel query. -func ParallelizableFunc(f parser.Function) bool { - - for _, v := range nonParallelFuncs { - if v == f.Name { - return false - } - } - return true -} diff --git a/internal/cortex/querier/astmapper/parallel_test.go b/internal/cortex/querier/astmapper/parallel_test.go deleted file mode 100644 index ebf144b151..0000000000 --- a/internal/cortex/querier/astmapper/parallel_test.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package astmapper - -import ( - "fmt" - "testing" - - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql/parser" - "github.com/stretchr/testify/require" -) - -func TestCanParallel(t *testing.T) { - var testExpr = []struct { - input parser.Expr - expected bool - }{ - // simple sum - { - &parser.AggregateExpr{ - Op: parser.SUM, - Without: true, - Expr: &parser.VectorSelector{ - Name: "some_metric", - LabelMatchers: []*labels.Matcher{ - mustLabelMatcher(labels.MatchEqual, string(model.MetricNameLabel), "some_metric"), - }, - }, - Grouping: []string{"foo"}, - }, - true, - }, - /* - sum( - sum by (foo) bar1{baz=”blip”}[1m]) - / - sum by (foo) bar2{baz=”blip”}[1m])) - ) - */ - { - &parser.AggregateExpr{ - Op: parser.SUM, - Expr: &parser.BinaryExpr{ - Op: parser.DIV, - LHS: &parser.AggregateExpr{ - Op: parser.SUM, - Grouping: []string{"foo"}, - Expr: &parser.VectorSelector{ - Name: "idk", - LabelMatchers: []*labels.Matcher{ - mustLabelMatcher(labels.MatchEqual, string(model.MetricNameLabel), "bar1"), - }}, - }, - RHS: &parser.AggregateExpr{ - Op: parser.SUM, - Grouping: []string{"foo"}, - Expr: &parser.VectorSelector{ - Name: "idk", - LabelMatchers: []*labels.Matcher{ - mustLabelMatcher(labels.MatchEqual, string(model.MetricNameLabel), "bar2"), - }}, - }, - }, - }, - false, - }, - // sum by (foo) bar1{baz=”blip”}[1m]) ---- this is the first leg of the above - { - &parser.AggregateExpr{ - Op: parser.SUM, - Grouping: []string{"foo"}, - Expr: &parser.VectorSelector{ - Name: "idk", - LabelMatchers: []*labels.Matcher{ - mustLabelMatcher(labels.MatchEqual, string(model.MetricNameLabel), "bar1"), - }}, - }, - true, - }, - } - - for i, c := range testExpr { - t.Run(fmt.Sprintf("[%d]", i), func(t *testing.T) { - res := CanParallelize(c.input) - require.Equal(t, c.expected, res) - }) - } -} - -func TestCanParallel_String(t *testing.T) { - var testExpr = []struct { - input string - expected bool - }{ - { - `sum by (foo) (rate(bar1{baz="blip"}[1m]))`, - true, - }, - { - `sum by (foo) (histogram_quantile(0.9, rate(http_request_duration_seconds_bucket[10m])))`, - false, - }, - { - `sum by (foo) ( - quantile_over_time(0.9, http_request_duration_seconds_bucket[10m]) - )`, - false, - }, - { - `sum( - count( - count( - foo{bar="baz"} - ) by (a,b) - ) by (instance) - )`, - false, - }, - } - - for i, c := range testExpr { - t.Run(fmt.Sprintf("[%d]", i), func(t *testing.T) { - expr, err := parser.ParseExpr(c.input) - require.Nil(t, err) - res := CanParallelize(expr) - require.Equal(t, c.expected, res) - }) - } -} diff --git a/internal/cortex/querier/astmapper/shard_summer.go b/internal/cortex/querier/astmapper/shard_summer.go deleted file mode 100644 index 4a26df8c1c..0000000000 --- a/internal/cortex/querier/astmapper/shard_summer.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package astmapper - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql/parser" -) - -const ( - // ShardLabel is a reserved label referencing a cortex shard - ShardLabel = "__cortex_shard__" - // ShardLabelFmt is the fmt of the ShardLabel key. - ShardLabelFmt = "%d_of_%d" -) - -var ( - // ShardLabelRE matches a value in ShardLabelFmt - ShardLabelRE = regexp.MustCompile("^[0-9]+_of_[0-9]+$") -) - -type squasher = func(...parser.Node) (parser.Expr, error) - -type shardSummer struct { - shards int - currentShard *int - squash squasher - - // Metrics. - shardedQueries prometheus.Counter -} - -// NewShardSummer instantiates an ASTMapper which will fan out sum queries by shard -func NewShardSummer(shards int, squasher squasher, shardedQueries prometheus.Counter) (ASTMapper, error) { - if squasher == nil { - return nil, errors.Errorf("squasher required and not passed") - } - - return NewASTNodeMapper(&shardSummer{ - shards: shards, - squash: squasher, - currentShard: nil, - shardedQueries: shardedQueries, - }), nil -} - -// CopyWithCurShard clones a shardSummer with a new current shard. -func (summer *shardSummer) CopyWithCurShard(curshard int) *shardSummer { - s := *summer - s.currentShard = &curshard - return &s -} - -// shardSummer expands a query AST by sharding and re-summing when possible -func (summer *shardSummer) MapNode(node parser.Node) (parser.Node, bool, error) { - - switch n := node.(type) { - case *parser.AggregateExpr: - if CanParallelize(n) && n.Op == parser.SUM { - result, err := summer.shardSum(n) - return result, true, err - } - - return n, false, nil - - case *parser.VectorSelector: - if summer.currentShard != nil { - mapped, err := shardVectorSelector(*summer.currentShard, summer.shards, n) - return mapped, true, err - } - return n, true, nil - - case *parser.MatrixSelector: - if summer.currentShard != nil { - mapped, err := shardMatrixSelector(*summer.currentShard, summer.shards, n) - return mapped, true, err - } - return n, true, nil - - default: - return n, false, nil - } -} - -// shardSum contains the logic for how we split/stitch legs of a parallelized sum query -func (summer *shardSummer) shardSum(expr *parser.AggregateExpr) (parser.Node, error) { - - parent, subSums, err := summer.splitSum(expr) - if err != nil { - return nil, err - } - - combinedSums, err := summer.squash(subSums...) - - if err != nil { - return nil, err - } - - parent.Expr = combinedSums - return parent, nil -} - -// splitSum forms the parent and child legs of a parallel query -func (summer *shardSummer) splitSum( - expr *parser.AggregateExpr, -) ( - parent *parser.AggregateExpr, - children []parser.Node, - err error, -) { - parent = &parser.AggregateExpr{ - Op: expr.Op, - Param: expr.Param, - } - var mkChild func(sharded *parser.AggregateExpr) parser.Expr - - if expr.Without { - /* - parallelizing a sum using without(foo) is representable naively as - sum without(foo) ( - sum without(__cortex_shard__) (rate(bar1{__cortex_shard__="0_of_2",baz="blip"}[1m])) or - sum without(__cortex_shard__) (rate(bar1{__cortex_shard__="1_of_2",baz="blip"}[1m])) - ) - or (more optimized): - sum without(__cortex_shard__) ( - sum without(foo) (rate(bar1{__cortex_shard__="0_of_2",baz="blip"}[1m])) or - sum without(foo) (rate(bar1{__cortex_shard__="1_of_2",baz="blip"}[1m])) - ) - - */ - parent.Grouping = []string{ShardLabel} - parent.Without = true - mkChild = func(sharded *parser.AggregateExpr) parser.Expr { - sharded.Grouping = expr.Grouping - sharded.Without = true - return sharded - } - } else if len(expr.Grouping) > 0 { - /* - parallelizing a sum using by(foo) is representable as - sum by(foo) ( - sum by(foo, __cortex_shard__) (rate(bar1{__cortex_shard__="0_of_2",baz="blip"}[1m])) or - sum by(foo, __cortex_shard__) (rate(bar1{__cortex_shard__="1_of_2",baz="blip"}[1m])) - ) - */ - parent.Grouping = expr.Grouping - mkChild = func(sharded *parser.AggregateExpr) parser.Expr { - groups := make([]string, 0, len(expr.Grouping)+1) - groups = append(groups, expr.Grouping...) - groups = append(groups, ShardLabel) - sharded.Grouping = groups - return sharded - } - } else { - /* - parallelizing a non-parameterized sum is representable as - sum( - sum without(__cortex_shard__) (rate(bar1{__cortex_shard__="0_of_2",baz="blip"}[1m])) or - sum without(__cortex_shard__) (rate(bar1{__cortex_shard__="1_of_2",baz="blip"}[1m])) - ) - or (more optimized): - sum without(__cortex_shard__) ( - sum by(__cortex_shard__) (rate(bar1{__cortex_shard__="0_of_2",baz="blip"}[1m])) or - sum by(__cortex_shard__) (rate(bar1{__cortex_shard__="1_of_2",baz="blip"}[1m])) - ) - */ - parent.Grouping = []string{ShardLabel} - parent.Without = true - mkChild = func(sharded *parser.AggregateExpr) parser.Expr { - sharded.Grouping = []string{ShardLabel} - return sharded - } - } - - // iterate across shardFactor to create children - for i := 0; i < summer.shards; i++ { - cloned, err := CloneNode(expr.Expr) - if err != nil { - return parent, children, err - } - - subSummer := NewASTNodeMapper(summer.CopyWithCurShard(i)) - sharded, err := subSummer.Map(cloned) - if err != nil { - return parent, children, err - } - - subSum := mkChild(&parser.AggregateExpr{ - Op: expr.Op, - Expr: sharded.(parser.Expr), - }) - - children = append(children, - subSum, - ) - } - - summer.recordShards(float64(summer.shards)) - - return parent, children, nil -} - -// ShardSummer is explicitly passed a prometheus.Counter during construction -// in order to prevent duplicate metric registerings (ShardSummers are created per request). -// recordShards prevents calling nil interfaces (commonly used in tests). -func (summer *shardSummer) recordShards(n float64) { - if summer.shardedQueries != nil { - summer.shardedQueries.Add(float64(summer.shards)) - } -} - -func shardVectorSelector(curshard, shards int, selector *parser.VectorSelector) (parser.Node, error) { - shardMatcher, err := labels.NewMatcher(labels.MatchEqual, ShardLabel, fmt.Sprintf(ShardLabelFmt, curshard, shards)) - if err != nil { - return nil, err - } - - return &parser.VectorSelector{ - Name: selector.Name, - Offset: selector.Offset, - LabelMatchers: append( - []*labels.Matcher{shardMatcher}, - selector.LabelMatchers..., - ), - }, nil -} - -func shardMatrixSelector(curshard, shards int, selector *parser.MatrixSelector) (parser.Node, error) { - shardMatcher, err := labels.NewMatcher(labels.MatchEqual, ShardLabel, fmt.Sprintf(ShardLabelFmt, curshard, shards)) - if err != nil { - return nil, err - } - - if vs, ok := selector.VectorSelector.(*parser.VectorSelector); ok { - return &parser.MatrixSelector{ - VectorSelector: &parser.VectorSelector{ - Name: vs.Name, - Offset: vs.Offset, - LabelMatchers: append( - []*labels.Matcher{shardMatcher}, - vs.LabelMatchers..., - ), - PosRange: vs.PosRange, - }, - Range: selector.Range, - EndPos: selector.EndPos, - }, nil - } - - return nil, fmt.Errorf("invalid selector type: %T", selector.VectorSelector) -} - -// ParseShard will extract the shard information encoded in ShardLabelFmt -func ParseShard(input string) (parsed ShardAnnotation, err error) { - if !ShardLabelRE.MatchString(input) { - return parsed, errors.Errorf("Invalid ShardLabel value: [%s]", input) - } - - matches := strings.Split(input, "_") - x, err := strconv.Atoi(matches[0]) - if err != nil { - return parsed, err - } - of, err := strconv.Atoi(matches[2]) - if err != nil { - return parsed, err - } - - if x >= of { - return parsed, errors.Errorf("Shards out of bounds: [%d] >= [%d]", x, of) - } - return ShardAnnotation{ - Shard: x, - Of: of, - }, err -} - -// ShardAnnotation is a convenience struct which holds data from a parsed shard label -type ShardAnnotation struct { - Shard int - Of int -} - -// String encodes a shardAnnotation into a label value -func (shard ShardAnnotation) String() string { - return fmt.Sprintf(ShardLabelFmt, shard.Shard, shard.Of) -} - -// Label generates the ShardAnnotation as a label -func (shard ShardAnnotation) Label() labels.Label { - return labels.Label{ - Name: ShardLabel, - Value: shard.String(), - } -} - -// ShardFromMatchers extracts a ShardAnnotation and the index it was pulled from in the matcher list -func ShardFromMatchers(matchers []*labels.Matcher) (shard *ShardAnnotation, idx int, err error) { - for i, matcher := range matchers { - if matcher.Name == ShardLabel && matcher.Type == labels.MatchEqual { - shard, err := ParseShard(matcher.Value) - if err != nil { - return nil, i, err - } - return &shard, i, nil - } - } - return nil, 0, nil -} diff --git a/internal/cortex/querier/astmapper/subtree_folder.go b/internal/cortex/querier/astmapper/subtree_folder.go deleted file mode 100644 index 3de3c7d450..0000000000 --- a/internal/cortex/querier/astmapper/subtree_folder.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package astmapper - -import ( - "github.com/prometheus/prometheus/promql/parser" -) - -/* -subtreeFolder is a NodeMapper which embeds an entire parser.Node in an embedded query -if it does not contain any previously embedded queries. This allows the frontend to "zip up" entire -subtrees of an AST that have not already been parallelized. -*/ -type subtreeFolder struct{} - -// NewSubtreeFolder creates a subtreeFolder which can reduce an AST -// to one embedded query if it contains no embedded queries yet -func NewSubtreeFolder() ASTMapper { - return NewASTNodeMapper(&subtreeFolder{}) -} - -// MapNode implements NodeMapper -func (f *subtreeFolder) MapNode(node parser.Node) (parser.Node, bool, error) { - switch n := node.(type) { - // do not attempt to fold number or string leaf nodes - case *parser.NumberLiteral, *parser.StringLiteral: - return n, true, nil - } - - containsEmbedded, err := Predicate(node, predicate(isEmbedded)) - if err != nil { - return nil, true, err - } - - if containsEmbedded { - return node, false, nil - } - - expr, err := VectorSquasher(node) - return expr, true, err -} - -func isEmbedded(node parser.Node) (bool, error) { - switch n := node.(type) { - case *parser.VectorSelector: - if n.Name == EmbeddedQueriesMetricName { - return true, nil - } - - case *parser.MatrixSelector: - return isEmbedded(n.VectorSelector) - } - return false, nil -} - -type predicate = func(parser.Node) (bool, error) - -// Predicate is a helper which uses parser.Walk under the hood determine if any node in a subtree -// returns true for a specified function -func Predicate(node parser.Node, fn predicate) (bool, error) { - v := &visitor{ - fn: fn, - } - - if err := parser.Walk(v, node, nil); err != nil { - return false, err - } - return v.result, nil -} - -type visitor struct { - fn predicate - result bool -} - -// Visit implements parser.Visitor -func (v *visitor) Visit(node parser.Node, path []parser.Node) (parser.Visitor, error) { - // if the visitor has already seen a predicate success, don't overwrite - if v.result { - return nil, nil - } - - var err error - - v.result, err = v.fn(node) - if err != nil { - return nil, err - } - if v.result { - return nil, nil - } - return v, nil -} diff --git a/internal/cortex/querier/lazyquery/lazyquery.go b/internal/cortex/querier/lazyquery/lazyquery.go deleted file mode 100644 index 9c08091c70..0000000000 --- a/internal/cortex/querier/lazyquery/lazyquery.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package lazyquery - -import ( - "context" - - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" -) - -// LazyQueryable wraps a storage.Queryable -type LazyQueryable struct { - q storage.Queryable -} - -// Querier implements storage.Queryable -func (lq LazyQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - q, err := lq.q.Querier(ctx, mint, maxt) - if err != nil { - return nil, err - } - - return NewLazyQuerier(q), nil -} - -// NewLazyQueryable returns a lazily wrapped queryable -func NewLazyQueryable(q storage.Queryable) storage.Queryable { - return LazyQueryable{q} -} - -// LazyQuerier is a lazy-loaded adapter for a storage.Querier -type LazyQuerier struct { - next storage.Querier -} - -// NewLazyQuerier wraps a storage.Querier, does the Select in the background. -// Return value cannot be used from more than one goroutine simultaneously. -func NewLazyQuerier(next storage.Querier) storage.Querier { - return LazyQuerier{next} -} - -// Select implements Storage.Querier -func (l LazyQuerier) Select(selectSorted bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - // make sure there is space in the buffer, to unblock the goroutine and let it die even if nobody is - // waiting for the result yet (or anymore). - future := make(chan storage.SeriesSet, 1) - go func() { - future <- l.next.Select(selectSorted, params, matchers...) - }() - - return &lazySeriesSet{ - future: future, - } -} - -// LabelValues implements Storage.Querier -func (l LazyQuerier) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return l.next.LabelValues(name, matchers...) -} - -// LabelNames implements Storage.Querier -func (l LazyQuerier) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return l.next.LabelNames(matchers...) -} - -// Close implements Storage.Querier -func (l LazyQuerier) Close() error { - return l.next.Close() -} - -type lazySeriesSet struct { - next storage.SeriesSet - future chan storage.SeriesSet -} - -// Next implements storage.SeriesSet. NB not thread safe! -func (s *lazySeriesSet) Next() bool { - if s.next == nil { - s.next = <-s.future - } - return s.next.Next() -} - -// At implements storage.SeriesSet. -func (s *lazySeriesSet) At() storage.Series { - if s.next == nil { - s.next = <-s.future - } - return s.next.At() -} - -// Err implements storage.SeriesSet. -func (s *lazySeriesSet) Err() error { - if s.next == nil { - s.next = <-s.future - } - return s.next.Err() -} - -// Warnings implements storage.SeriesSet. -func (s *lazySeriesSet) Warnings() storage.Warnings { - return nil -} diff --git a/internal/cortex/querier/querier.go b/internal/cortex/querier/querier.go index 1365d93ae8..9d3fad079e 100644 --- a/internal/cortex/querier/querier.go +++ b/internal/cortex/querier/querier.go @@ -41,10 +41,6 @@ type Config struct { // series is considered stale. LookbackDelta time.Duration `yaml:"lookback_delta"` - // Blocks storage only. - StoreGatewayAddresses string `yaml:"store_gateway_addresses"` - StoreGatewayClient ClientConfig `yaml:"store_gateway_client"` - SecondStoreEngine string `yaml:"second_store_engine"` UseSecondStoreBeforeTime flagext.Time `yaml:"use_second_store_before_time"` diff --git a/internal/cortex/querier/queryrange/promql_test.go b/internal/cortex/querier/queryrange/promql_test.go deleted file mode 100644 index 9605798886..0000000000 --- a/internal/cortex/querier/queryrange/promql_test.go +++ /dev/null @@ -1,691 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package queryrange - -import ( - "context" - "fmt" - "math" - "sort" - "strings" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/stretchr/testify/require" - - "github.com/thanos-io/thanos/internal/cortex/querier/astmapper" -) - -var ( - start = time.Unix(1000, 0) - end = start.Add(3 * time.Minute) - step = 30 * time.Second - ctx = context.Background() - engine = promql.NewEngine(promql.EngineOpts{ - Reg: prometheus.DefaultRegisterer, - Logger: log.NewNopLogger(), - Timeout: 1 * time.Hour, - MaxSamples: 10e6, - ActiveQueryTracker: nil, - }) -) - -// This test allows to verify which PromQL expressions can be parallelized. -func Test_PromQL(t *testing.T) { - t.Parallel() - - var tests = []struct { - normalQuery string - shardQuery string - shouldEqual bool - }{ - // Vector can be parallelized but we need to remove the cortex shard label. - // It should be noted that the __cortex_shard__ label is required by the engine - // and therefore should be returned by the storage. - // Range vectors `bar1{baz="blip"}[1m]` are not tested here because it is not supported - // by range queries. - { - `bar1{baz="blip"}`, - `label_replace( - bar1{__cortex_shard__="0_of_3",baz="blip"} or - bar1{__cortex_shard__="1_of_3",baz="blip"} or - bar1{__cortex_shard__="2_of_3",baz="blip"}, - "__cortex_shard__","","","" - )`, - true, - }, - // __cortex_shard__ label is required otherwise the or will keep only the first series. - { - `sum(bar1{baz="blip"})`, - `sum( - sum (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - sum (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - sum (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - false, - }, - { - `sum(bar1{baz="blip"})`, - `sum( - sum without(__cortex_shard__) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - sum without(__cortex_shard__) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - sum without(__cortex_shard__) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - true, - }, - { - `sum by (foo) (bar1{baz="blip"})`, - `sum by (foo) ( - sum by(foo,__cortex_shard__) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - sum by(foo,__cortex_shard__) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - sum by(foo,__cortex_shard__) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - true, - }, - { - `sum by (foo,bar) (bar1{baz="blip"})`, - `sum by (foo,bar)( - sum by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - sum by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - sum by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - true, - }, - // since series are unique to a shard, it's safe to sum without shard first, then reaggregate - { - `sum without (foo,bar) (bar1{baz="blip"})`, - `sum without (foo,bar)( - sum without(__cortex_shard__) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - sum without(__cortex_shard__) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - sum without(__cortex_shard__) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - true, - }, - { - `min by (foo,bar) (bar1{baz="blip"})`, - `min by (foo,bar)( - min by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - min by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - min by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - true, - }, - { - `max by (foo,bar) (bar1{baz="blip"})`, - ` max by (foo,bar)( - max by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - max by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - max by(foo,bar,__cortex_shard__) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - true, - }, - // avg generally cant be parallelized - { - `avg(bar1{baz="blip"})`, - `avg( - avg by(__cortex_shard__) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - avg by(__cortex_shard__) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - avg by(__cortex_shard__) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - false, - }, - // stddev can't be parallelized. - { - `stddev(bar1{baz="blip"})`, - ` stddev( - stddev by(__cortex_shard__) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - stddev by(__cortex_shard__) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - stddev by(__cortex_shard__) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - false, - }, - // stdvar can't be parallelized. - { - `stdvar(bar1{baz="blip"})`, - `stdvar( - stdvar by(__cortex_shard__) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - stdvar by(__cortex_shard__) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - stdvar by(__cortex_shard__) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - false, - }, - { - `count(bar1{baz="blip"})`, - `count( - count without (__cortex_shard__) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - count without (__cortex_shard__) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - count without (__cortex_shard__) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - true, - }, - { - `count by (foo,bar) (bar1{baz="blip"})`, - `count by (foo,bar) ( - count by (foo,bar,__cortex_shard__) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - count by (foo,bar,__cortex_shard__) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - count by (foo,bar,__cortex_shard__) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - true, - }, - // different ways to represent count without. - { - `count without (foo) (bar1{baz="blip"})`, - `count without (foo) ( - count without (__cortex_shard__) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - count without (__cortex_shard__) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - count without (__cortex_shard__) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - true, - }, - { - `count without (foo) (bar1{baz="blip"})`, - `sum without (__cortex_shard__) ( - count without (foo) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - count without (foo) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - count without (foo) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - true, - }, - { - `count without (foo, bar) (bar1{baz="blip"})`, - `count without (foo, bar) ( - count without (__cortex_shard__) (bar1{__cortex_shard__="0_of_3",baz="blip"}) or - count without (__cortex_shard__) (bar1{__cortex_shard__="1_of_3",baz="blip"}) or - count without (__cortex_shard__) (bar1{__cortex_shard__="2_of_3",baz="blip"}) - )`, - true, - }, - { - `topk(2,bar1{baz="blip"})`, - `label_replace( - topk(2, - topk(2,(bar1{__cortex_shard__="0_of_3",baz="blip"})) without(__cortex_shard__) or - topk(2,(bar1{__cortex_shard__="1_of_3",baz="blip"})) without(__cortex_shard__) or - topk(2,(bar1{__cortex_shard__="2_of_3",baz="blip"})) without(__cortex_shard__) - ), - "__cortex_shard__","","","")`, - true, - }, - { - `bottomk(2,bar1{baz="blip"})`, - `label_replace( - bottomk(2, - bottomk(2,(bar1{__cortex_shard__="0_of_3",baz="blip"})) without(__cortex_shard__) or - bottomk(2,(bar1{__cortex_shard__="1_of_3",baz="blip"})) without(__cortex_shard__) or - bottomk(2,(bar1{__cortex_shard__="2_of_3",baz="blip"})) without(__cortex_shard__) - ), - "__cortex_shard__","","","")`, - true, - }, - { - `sum by (foo,bar) (avg_over_time(bar1{baz="blip"}[1m]))`, - `sum by (foo,bar)( - sum by(foo,bar,__cortex_shard__) (avg_over_time(bar1{__cortex_shard__="0_of_3",baz="blip"}[1m])) or - sum by(foo,bar,__cortex_shard__) (avg_over_time(bar1{__cortex_shard__="1_of_3",baz="blip"}[1m])) or - sum by(foo,bar,__cortex_shard__) (avg_over_time(bar1{__cortex_shard__="2_of_3",baz="blip"}[1m])) - )`, - true, - }, - { - `sum by (foo,bar) (min_over_time(bar1{baz="blip"}[1m]))`, - `sum by (foo,bar)( - sum by(foo,bar,__cortex_shard__) (min_over_time(bar1{__cortex_shard__="0_of_3",baz="blip"}[1m])) or - sum by(foo,bar,__cortex_shard__) (min_over_time(bar1{__cortex_shard__="1_of_3",baz="blip"}[1m])) or - sum by(foo,bar,__cortex_shard__) (min_over_time(bar1{__cortex_shard__="2_of_3",baz="blip"}[1m])) - )`, - true, - }, - { - // Sub aggregations must avoid non-associative series merging across shards - `sum( - count( - bar1 - ) by (foo,bazz) - )`, - ` - sum without(__cortex_shard__) ( - sum by(__cortex_shard__) ( - count by(foo, bazz) (foo{__cortex_shard__="0_of_2",bar="baz"}) - ) or - sum by(__cortex_shard__) ( - count by(foo, bazz) (foo{__cortex_shard__="1_of_2",bar="baz"}) - ) - ) -`, - false, - }, - { - // Note: this is a speculative optimization that we don't currently include due to mapping complexity. - // Certain sub aggregations may inject __cortex_shard__ for all (by) subgroupings. - // This is the same as the previous test with the exception that the shard label is injected to the count grouping - `sum( - count( - bar1 - ) by (foo,bazz) - )`, - ` - sum without(__cortex_shard__) ( - sum by(__cortex_shard__) ( - count by(foo, bazz, __cortex_shard__) (foo{__cortex_shard__="0_of_2",bar="baz"}) - ) or - sum by(__cortex_shard__) ( - count by(foo, bazz, __cortex_shard__) (foo{__cortex_shard__="1_of_2",bar="baz"}) - ) - ) -`, - true, - }, - { - // Note: this is a speculative optimization that we don't currently include due to mapping complexity - // This example details multiple layers of aggregations. - // Sub aggregations must inject __cortex_shard__ for all (by) subgroupings. - `sum( - count( - count( - bar1 - ) by (foo,bazz) - ) by (bazz) - )`, - ` - sum without(__cortex_shard__) ( - sum by(__cortex_shard__) ( - count by(bazz, __cortex_shard__) ( - count by(foo, bazz, __cortex_shard__) ( - foo{__cortex_shard__="0_of_2", bar="baz"} - ) - ) - ) or - sum by(__cortex_shard__) ( - count by(bazz, __cortex_shard__) ( - count by(foo, bazz, __cortex_shard__) ( - foo{__cortex_shard__="1_of_2", bar="baz"} - ) - ) - ) - ) -`, - true, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.normalQuery, func(t *testing.T) { - - baseQuery, err := engine.NewRangeQuery(shardAwareQueryable, nil, tt.normalQuery, start, end, step) - require.Nil(t, err) - shardQuery, err := engine.NewRangeQuery(shardAwareQueryable, nil, tt.shardQuery, start, end, step) - require.Nil(t, err) - baseResult := baseQuery.Exec(ctx) - shardResult := shardQuery.Exec(ctx) - t.Logf("base: %v\n", baseResult) - t.Logf("shard: %v\n", shardResult) - if tt.shouldEqual { - require.Equal(t, baseResult, shardResult) - return - } - require.NotEqual(t, baseResult, shardResult) - }) - } - -} - -func Test_FunctionParallelism(t *testing.T) { - tpl := `sum((bar1{}))` - shardTpl := `sum( - sum without(__cortex_shard__) ((bar1{__cortex_shard__="0_of_3"})) or - sum without(__cortex_shard__) ((bar1{__cortex_shard__="1_of_3"})) or - sum without(__cortex_shard__) ((bar1{__cortex_shard__="2_of_3"})) - )` - - mkQuery := func(tpl, fn string, testMatrix bool, fArgs []string) (result string) { - result = strings.Replace(tpl, "", fn, -1) - - if testMatrix { - // turn selectors into ranges - result = strings.Replace(result, "}", "}[1m]", -1) - } - - if len(fArgs) > 0 { - args := "," + strings.Join(fArgs, ",") - result = strings.Replace(result, "", args, -1) - } else { - result = strings.Replace(result, "", "", -1) - } - - return result - } - - for _, tc := range []struct { - fn string - fArgs []string - isTestMatrix bool - approximate bool - }{ - { - fn: "abs", - }, - { - fn: "avg_over_time", - isTestMatrix: true, - approximate: true, - }, - { - fn: "ceil", - }, - { - fn: "changes", - isTestMatrix: true, - }, - { - fn: "count_over_time", - isTestMatrix: true, - }, - { - fn: "days_in_month", - }, - { - fn: "day_of_month", - }, - { - fn: "day_of_week", - }, - { - fn: "delta", - isTestMatrix: true, - approximate: true, - }, - { - fn: "deriv", - isTestMatrix: true, - approximate: true, - }, - { - fn: "exp", - approximate: true, - }, - { - fn: "floor", - }, - { - fn: "hour", - }, - { - fn: "idelta", - isTestMatrix: true, - approximate: true, - }, - { - fn: "increase", - isTestMatrix: true, - approximate: true, - }, - { - fn: "irate", - isTestMatrix: true, - approximate: true, - }, - { - fn: "ln", - approximate: true, - }, - { - fn: "log10", - approximate: true, - }, - { - fn: "log2", - approximate: true, - }, - { - fn: "max_over_time", - isTestMatrix: true, - }, - { - fn: "min_over_time", - isTestMatrix: true, - }, - { - fn: "minute", - }, - { - fn: "month", - }, - { - fn: "rate", - isTestMatrix: true, - approximate: true, - }, - { - fn: "resets", - isTestMatrix: true, - }, - { - fn: "sort", - }, - { - fn: "sort_desc", - }, - { - fn: "sqrt", - approximate: true, - }, - { - fn: "stddev_over_time", - isTestMatrix: true, - approximate: true, - }, - { - fn: "stdvar_over_time", - isTestMatrix: true, - approximate: true, - }, - { - fn: "sum_over_time", - isTestMatrix: true, - }, - { - fn: "timestamp", - }, - { - fn: "year", - }, - { - fn: "clamp_max", - fArgs: []string{"5"}, - }, - { - fn: "clamp_min", - fArgs: []string{"5"}, - }, - { - fn: "predict_linear", - isTestMatrix: true, - approximate: true, - fArgs: []string{"1"}, - }, - { - fn: "round", - fArgs: []string{"20"}, - }, - { - fn: "holt_winters", - isTestMatrix: true, - fArgs: []string{"0.5", "0.7"}, - approximate: true, - }, - } { - - t.Run(tc.fn, func(t *testing.T) { - baseQuery, err := engine.NewRangeQuery( - shardAwareQueryable, - nil, - mkQuery(tpl, tc.fn, tc.isTestMatrix, tc.fArgs), - start, - end, - step, - ) - require.Nil(t, err) - shardQuery, err := engine.NewRangeQuery( - shardAwareQueryable, - nil, - mkQuery(shardTpl, tc.fn, tc.isTestMatrix, tc.fArgs), - start, - end, - step, - ) - require.Nil(t, err) - baseResult := baseQuery.Exec(ctx) - shardResult := shardQuery.Exec(ctx) - t.Logf("base: %+v\n", baseResult) - t.Logf("shard: %+v\n", shardResult) - if !tc.approximate { - require.Equal(t, baseResult, shardResult) - } else { - // Some functions yield tiny differences when sharded due to combining floating point calculations. - baseSeries := baseResult.Value.(promql.Matrix)[0] - shardSeries := shardResult.Value.(promql.Matrix)[0] - - require.Equal(t, len(baseSeries.Points), len(shardSeries.Points)) - for i, basePt := range baseSeries.Points { - shardPt := shardSeries.Points[i] - require.Equal(t, basePt.T, shardPt.T) - require.Equal( - t, - math.Round(basePt.V*1e6)/1e6, - math.Round(shardPt.V*1e6)/1e6, - ) - } - - } - }) - } - -} - -var shardAwareQueryable = storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return &testMatrix{ - series: []*promql.StorageSeries{ - newSeries(labels.Labels{{Name: "__name__", Value: "bar1"}, {Name: "baz", Value: "blip"}, {Name: "bar", Value: "blop"}, {Name: "foo", Value: "barr"}}, factor(5)), - newSeries(labels.Labels{{Name: "__name__", Value: "bar1"}, {Name: "baz", Value: "blip"}, {Name: "bar", Value: "blop"}, {Name: "foo", Value: "bazz"}}, factor(7)), - newSeries(labels.Labels{{Name: "__name__", Value: "bar1"}, {Name: "baz", Value: "blip"}, {Name: "bar", Value: "blap"}, {Name: "foo", Value: "buzz"}}, factor(12)), - newSeries(labels.Labels{{Name: "__name__", Value: "bar1"}, {Name: "baz", Value: "blip"}, {Name: "bar", Value: "blap"}, {Name: "foo", Value: "bozz"}}, factor(11)), - newSeries(labels.Labels{{Name: "__name__", Value: "bar1"}, {Name: "baz", Value: "blip"}, {Name: "bar", Value: "blop"}, {Name: "foo", Value: "buzz"}}, factor(8)), - newSeries(labels.Labels{{Name: "__name__", Value: "bar1"}, {Name: "baz", Value: "blip"}, {Name: "bar", Value: "blap"}, {Name: "foo", Value: "bazz"}}, identity), - }, - }, nil -}) - -type testMatrix struct { - series []*promql.StorageSeries -} - -func (m *testMatrix) Copy() *testMatrix { - cpy := *m - return &cpy -} - -func (m testMatrix) Next() bool { return len(m.series) != 0 } - -func (m *testMatrix) At() storage.Series { - res := m.series[0] - m.series = m.series[1:] - return res -} - -func (m *testMatrix) Err() error { return nil } - -func (m *testMatrix) Warnings() storage.Warnings { return nil } - -func (m *testMatrix) Select(_ bool, selectParams *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - s, _, err := astmapper.ShardFromMatchers(matchers) - if err != nil { - return storage.ErrSeriesSet(err) - } - - if s != nil { - return splitByShard(s.Shard, s.Of, m) - } - - return m.Copy() -} - -func (m *testMatrix) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return nil, nil, nil -} -func (m *testMatrix) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return nil, nil, nil -} -func (m *testMatrix) Close() error { return nil } - -func newSeries(metric labels.Labels, generator func(float64) float64) *promql.StorageSeries { - sort.Sort(metric) - var points []promql.Point - - for ts := start.Add(-step); ts.Unix() <= end.Unix(); ts = ts.Add(step) { - t := ts.Unix() * 1e3 - points = append(points, promql.Point{ - T: t, - V: generator(float64(t)), - }) - } - - return promql.NewStorageSeries(promql.Series{ - Metric: metric, - Points: points, - }) -} - -func identity(t float64) float64 { - return float64(t) -} - -func factor(f float64) func(float64) float64 { - i := 0. - return func(float64) float64 { - i++ - res := i * f - return res - } -} - -// var identity(t int64) float64 { -// return float64(t) -// } - -// splitByShard returns the shard subset of a testMatrix. -// e.g if a testMatrix has 6 series, and we want 3 shard, then each shard will contain -// 2 series. -func splitByShard(shardIndex, shardTotal int, testMatrices *testMatrix) *testMatrix { - res := &testMatrix{} - for i, s := range testMatrices.series { - if i%shardTotal != shardIndex { - continue - } - var points []promql.Point - it := s.Iterator() - for it.Next() != chunkenc.ValNone { - t, v := it.At() - points = append(points, promql.Point{ - T: t, - V: v, - }) - - } - lbs := s.Labels().Copy() - lbs = append(lbs, labels.Label{Name: "__cortex_shard__", Value: fmt.Sprintf("%d_of_%d", shardIndex, shardTotal)}) - sort.Sort(lbs) - res.series = append(res.series, promql.NewStorageSeries(promql.Series{ - Metric: lbs, - Points: points, - })) - } - return res -} diff --git a/internal/cortex/querier/queryrange/series_test.go b/internal/cortex/querier/queryrange/series_test.go deleted file mode 100644 index d41bc43a92..0000000000 --- a/internal/cortex/querier/queryrange/series_test.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package queryrange - -import ( - "testing" - - "github.com/prometheus/prometheus/promql/parser" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/stretchr/testify/require" - - "github.com/thanos-io/thanos/internal/cortex/cortexpb" -) - -func Test_ResponseToSamples(t *testing.T) { - input := &PrometheusResponse{ - Data: PrometheusData{ - ResultType: string(parser.ValueTypeMatrix), - Result: []SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "a", Value: "a1"}, - {Name: "b", Value: "b1"}, - }, - Samples: []cortexpb.Sample{ - { - Value: 1, - TimestampMs: 1, - }, - { - Value: 2, - TimestampMs: 2, - }, - }, - }, - { - Labels: []cortexpb.LabelAdapter{ - {Name: "a", Value: "a1"}, - {Name: "b", Value: "b1"}, - }, - Samples: []cortexpb.Sample{ - { - Value: 8, - TimestampMs: 1, - }, - { - Value: 9, - TimestampMs: 2, - }, - }, - }, - }, - }, - } - - streams, err := ResponseToSamples(input) - require.Nil(t, err) - set := NewSeriesSet(streams) - - setCt := 0 - - for set.Next() { - iter := set.At().Iterator() - require.Nil(t, set.Err()) - - sampleCt := 0 - for iter.Next() != chunkenc.ValNone { - ts, v := iter.At() - require.Equal(t, input.Data.Result[setCt].Samples[sampleCt].TimestampMs, ts) - require.Equal(t, input.Data.Result[setCt].Samples[sampleCt].Value, v) - sampleCt++ - } - require.Equal(t, len(input.Data.Result[setCt].Samples), sampleCt) - setCt++ - } - - require.Equal(t, len(input.Data.Result), setCt) - -} diff --git a/internal/cortex/querier/queryrange/test_utils.go b/internal/cortex/querier/queryrange/test_utils.go deleted file mode 100644 index 2294eaa319..0000000000 --- a/internal/cortex/querier/queryrange/test_utils.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package queryrange - -import ( - "context" - "fmt" - "time" - - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - - "github.com/thanos-io/thanos/internal/cortex/querier/astmapper" - "github.com/thanos-io/thanos/internal/cortex/querier/series" -) - -// genLabels will create a slice of labels where each label has an equal chance to occupy a value from [0,labelBuckets]. It returns a slice of length labelBuckets^len(labelSet) -func genLabels( - labelSet []string, - labelBuckets int, -) (result []labels.Labels) { - if len(labelSet) == 0 { - return result - } - - l := labelSet[0] - rest := genLabels(labelSet[1:], labelBuckets) - - for i := 0; i < labelBuckets; i++ { - x := labels.Label{ - Name: l, - Value: fmt.Sprintf("%d", i), - } - if len(rest) == 0 { - set := labels.Labels{x} - result = append(result, set) - continue - } - for _, others := range rest { - set := append(others, x) - result = append(result, set) - } - } - return result - -} - -// NewMockShardedQueryable creates a shard-aware in memory queryable. -func NewMockShardedQueryable( - nSamples int, - labelSet []string, - labelBuckets int, - delayPerSeries time.Duration, -) *MockShardedQueryable { - samples := make([]model.SamplePair, 0, nSamples) - for i := 0; i < nSamples; i++ { - samples = append(samples, model.SamplePair{ - Timestamp: model.Time(i * 1000), - Value: model.SampleValue(i), - }) - } - sets := genLabels(labelSet, labelBuckets) - xs := make([]storage.Series, 0, len(sets)) - for _, ls := range sets { - xs = append(xs, series.NewConcreteSeries(ls, samples)) - } - - return &MockShardedQueryable{ - series: xs, - delayPerSeries: delayPerSeries, - } -} - -// MockShardedQueryable is exported to be reused in the querysharding benchmarking -type MockShardedQueryable struct { - series []storage.Series - delayPerSeries time.Duration -} - -// Querier impls storage.Queryable -func (q *MockShardedQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return q, nil -} - -// Select implements storage.Querier interface. -// The bool passed is ignored because the series is always sorted. -func (q *MockShardedQueryable) Select(_ bool, _ *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - tStart := time.Now() - - shard, _, err := astmapper.ShardFromMatchers(matchers) - if err != nil { - return storage.ErrSeriesSet(err) - } - - var ( - start int - end int - ) - - if shard == nil { - start = 0 - end = len(q.series) - } else { - // return the series range associated with this shard - seriesPerShard := len(q.series) / shard.Of - start = shard.Shard * seriesPerShard - end = start + seriesPerShard - - // if we're clipping an odd # of series, add the final series to the last shard - if end == len(q.series)-1 && len(q.series)%2 == 1 { - end = len(q.series) - } - } - - var name string - for _, m := range matchers { - if m.Type == labels.MatchEqual && m.Name == "__name__" { - name = m.Value - } - } - - results := make([]storage.Series, 0, end-start) - for i := start; i < end; i++ { - results = append(results, &ShardLabelSeries{ - shard: shard, - name: name, - Series: q.series[i], - }) - } - - // loosely enforce the assumption that an operation on 1/nth of the data - // takes 1/nth of the time. - duration := q.delayPerSeries * time.Duration(len(q.series)) - if shard != nil { - duration = duration / time.Duration(shard.Of) - } - - remaining := time.Until(tStart.Add(duration)) - if remaining > 0 { - time.Sleep(remaining) - } - - // sorted - return series.NewConcreteSeriesSet(results) -} - -// ShardLabelSeries allows extending a Series with new labels. This is helpful for adding cortex shard labels -type ShardLabelSeries struct { - shard *astmapper.ShardAnnotation - name string - storage.Series -} - -// Labels impls storage.Series -func (s *ShardLabelSeries) Labels() labels.Labels { - ls := s.Series.Labels() - - if s.name != "" { - ls = append(ls, labels.Label{ - Name: "__name__", - Value: s.name, - }) - } - - if s.shard != nil { - ls = append(ls, s.shard.Label()) - } - - return ls -} - -// LabelValues impls storage.Querier -func (q *MockShardedQueryable) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return nil, nil, errors.Errorf("unimplemented") -} - -// LabelNames returns all the unique label names present in the block in sorted order. -func (q *MockShardedQueryable) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return nil, nil, errors.Errorf("unimplemented") -} - -// Close releases the resources of the Querier. -func (q *MockShardedQueryable) Close() error { - return nil -} diff --git a/internal/cortex/querier/queryrange/test_utils_test.go b/internal/cortex/querier/queryrange/test_utils_test.go deleted file mode 100644 index 559bd701d8..0000000000 --- a/internal/cortex/querier/queryrange/test_utils_test.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package queryrange - -import ( - "math" - "sort" - "testing" - - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/stretchr/testify/require" - - "github.com/thanos-io/thanos/internal/cortex/querier/astmapper" -) - -func TestGenLabelsCorrectness(t *testing.T) { - ls := genLabels([]string{"a", "b"}, 2) - for _, set := range ls { - sort.Sort(set) - } - expected := []labels.Labels{ - { - labels.Label{ - Name: "a", - Value: "0", - }, - labels.Label{ - Name: "b", - Value: "0", - }, - }, - { - labels.Label{ - Name: "a", - Value: "0", - }, - labels.Label{ - Name: "b", - Value: "1", - }, - }, - { - labels.Label{ - Name: "a", - Value: "1", - }, - labels.Label{ - Name: "b", - Value: "0", - }, - }, - { - labels.Label{ - Name: "a", - Value: "1", - }, - labels.Label{ - Name: "b", - Value: "1", - }, - }, - } - require.Equal(t, expected, ls) -} - -func TestGenLabelsSize(t *testing.T) { - for _, tc := range []struct { - set []string - buckets int - }{ - { - set: []string{"a", "b"}, - buckets: 5, - }, - { - set: []string{"a", "b", "c"}, - buckets: 10, - }, - } { - sets := genLabels(tc.set, tc.buckets) - require.Equal( - t, - math.Pow(float64(tc.buckets), float64(len(tc.set))), - float64(len(sets)), - ) - } -} - -func TestNewMockShardedqueryable(t *testing.T) { - for _, tc := range []struct { - shards, nSamples, labelBuckets int - labelSet []string - }{ - { - nSamples: 100, - shards: 1, - labelBuckets: 3, - labelSet: []string{"a", "b", "c"}, - }, - { - nSamples: 0, - shards: 2, - labelBuckets: 3, - labelSet: []string{"a", "b", "c"}, - }, - } { - q := NewMockShardedQueryable(tc.nSamples, tc.labelSet, tc.labelBuckets, 0) - expectedSeries := int(math.Pow(float64(tc.labelBuckets), float64(len(tc.labelSet)))) - - seriesCt := 0 - for i := 0; i < tc.shards; i++ { - - set := q.Select(false, nil, &labels.Matcher{ - Type: labels.MatchEqual, - Name: astmapper.ShardLabel, - Value: astmapper.ShardAnnotation{ - Shard: i, - Of: tc.shards, - }.String(), - }) - - require.Nil(t, set.Err()) - - for set.Next() { - seriesCt++ - iter := set.At().Iterator() - samples := 0 - for iter.Next() != chunkenc.ValNone { - samples++ - } - require.Equal(t, tc.nSamples, samples) - } - - } - require.Equal(t, expectedSeries, seriesCt) - } -} diff --git a/internal/cortex/querier/queryrange/value.go b/internal/cortex/querier/queryrange/value.go deleted file mode 100644 index be3c7566ac..0000000000 --- a/internal/cortex/querier/queryrange/value.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package queryrange - -import ( - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql" - "github.com/prometheus/prometheus/promql/parser" - "github.com/prometheus/prometheus/storage" - - "github.com/thanos-io/thanos/internal/cortex/cortexpb" - "github.com/thanos-io/thanos/internal/cortex/querier/series" -) - -// FromResult transforms a promql query result into a samplestream -func FromResult(res *promql.Result) ([]SampleStream, error) { - if res.Err != nil { - // The error could be wrapped by the PromQL engine. We get the error's cause in order to - // correctly parse the error in parent callers (eg. gRPC response status code extraction). - return nil, errors.Cause(res.Err) - } - switch v := res.Value.(type) { - case promql.Scalar: - return []SampleStream{ - { - Samples: []cortexpb.Sample{ - { - Value: v.V, - TimestampMs: v.T, - }, - }, - }, - }, nil - - case promql.Vector: - res := make([]SampleStream, 0, len(v)) - for _, sample := range v { - res = append(res, SampleStream{ - Labels: mapLabels(sample.Metric), - Samples: mapPoints(sample.Point), - }) - } - return res, nil - - case promql.Matrix: - res := make([]SampleStream, 0, len(v)) - for _, series := range v { - res = append(res, SampleStream{ - Labels: mapLabels(series.Metric), - Samples: mapPoints(series.Points...), - }) - } - return res, nil - - } - - return nil, errors.Errorf("Unexpected value type: [%s]", res.Value.Type()) -} - -func mapLabels(ls labels.Labels) []cortexpb.LabelAdapter { - result := make([]cortexpb.LabelAdapter, 0, len(ls)) - for _, l := range ls { - result = append(result, cortexpb.LabelAdapter(l)) - } - - return result -} - -func mapPoints(pts ...promql.Point) []cortexpb.Sample { - result := make([]cortexpb.Sample, 0, len(pts)) - - for _, pt := range pts { - result = append(result, cortexpb.Sample{ - Value: pt.V, - TimestampMs: pt.T, - }) - } - - return result -} - -// ResponseToSamples is needed to map back from api response to the underlying series data -func ResponseToSamples(resp Response) ([]SampleStream, error) { - promRes, ok := resp.(*PrometheusResponse) - if !ok { - return nil, errors.Errorf("error invalid response type: %T, expected: %T", resp, &PrometheusResponse{}) - } - if promRes.Error != "" { - return nil, errors.New(promRes.Error) - } - switch promRes.Data.ResultType { - case string(parser.ValueTypeVector), string(parser.ValueTypeMatrix): - return promRes.Data.Result, nil - } - - return nil, errors.Errorf( - "Invalid promql.Value type: [%s]. Only %s and %s supported", - promRes.Data.ResultType, - parser.ValueTypeVector, - parser.ValueTypeMatrix, - ) -} - -// NewSeriesSet returns an in memory storage.SeriesSet from a []SampleStream -// As NewSeriesSet uses NewConcreteSeriesSet to implement SeriesSet, result will be sorted by label names. -func NewSeriesSet(results []SampleStream) storage.SeriesSet { - set := make([]storage.Series, 0, len(results)) - - for _, stream := range results { - samples := make([]model.SamplePair, 0, len(stream.Samples)) - for _, sample := range stream.Samples { - samples = append(samples, model.SamplePair{ - Timestamp: model.Time(sample.TimestampMs), - Value: model.SampleValue(sample.Value), - }) - } - - ls := make([]labels.Label, 0, len(stream.Labels)) - for _, l := range stream.Labels { - ls = append(ls, labels.Label(l)) - } - set = append(set, series.NewConcreteSeries(ls, samples)) - } - return series.NewConcreteSeriesSet(set) -} diff --git a/internal/cortex/querier/queryrange/value_test.go b/internal/cortex/querier/queryrange/value_test.go deleted file mode 100644 index 92367120e0..0000000000 --- a/internal/cortex/querier/queryrange/value_test.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package queryrange - -import ( - "fmt" - "testing" - - "github.com/pkg/errors" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql" - "github.com/stretchr/testify/require" - - "github.com/thanos-io/thanos/internal/cortex/cortexpb" -) - -func TestFromValue(t *testing.T) { - var testExpr = []struct { - input *promql.Result - err bool - expected []SampleStream - }{ - // string (errors) - { - input: &promql.Result{Value: promql.String{T: 1, V: "hi"}}, - err: true, - }, - { - input: &promql.Result{Err: errors.New("foo")}, - err: true, - }, - // Scalar - { - input: &promql.Result{Value: promql.Scalar{T: 1, V: 1}}, - err: false, - expected: []SampleStream{ - { - Samples: []cortexpb.Sample{ - { - Value: 1, - TimestampMs: 1, - }, - }, - }, - }, - }, - // Vector - { - input: &promql.Result{ - Value: promql.Vector{ - promql.Sample{ - Point: promql.Point{T: 1, V: 1}, - Metric: labels.Labels{ - {Name: "a", Value: "a1"}, - {Name: "b", Value: "b1"}, - }, - }, - promql.Sample{ - Point: promql.Point{T: 2, V: 2}, - Metric: labels.Labels{ - {Name: "a", Value: "a2"}, - {Name: "b", Value: "b2"}, - }, - }, - }, - }, - err: false, - expected: []SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "a", Value: "a1"}, - {Name: "b", Value: "b1"}, - }, - Samples: []cortexpb.Sample{ - { - Value: 1, - TimestampMs: 1, - }, - }, - }, - { - Labels: []cortexpb.LabelAdapter{ - {Name: "a", Value: "a2"}, - {Name: "b", Value: "b2"}, - }, - Samples: []cortexpb.Sample{ - { - Value: 2, - TimestampMs: 2, - }, - }, - }, - }, - }, - // Matrix - { - input: &promql.Result{ - Value: promql.Matrix{ - { - Metric: labels.Labels{ - {Name: "a", Value: "a1"}, - {Name: "b", Value: "b1"}, - }, - Points: []promql.Point{ - {T: 1, V: 1}, - {T: 2, V: 2}, - }, - }, - { - Metric: labels.Labels{ - {Name: "a", Value: "a2"}, - {Name: "b", Value: "b2"}, - }, - Points: []promql.Point{ - {T: 1, V: 8}, - {T: 2, V: 9}, - }, - }, - }, - }, - err: false, - expected: []SampleStream{ - { - Labels: []cortexpb.LabelAdapter{ - {Name: "a", Value: "a1"}, - {Name: "b", Value: "b1"}, - }, - Samples: []cortexpb.Sample{ - { - Value: 1, - TimestampMs: 1, - }, - { - Value: 2, - TimestampMs: 2, - }, - }, - }, - { - Labels: []cortexpb.LabelAdapter{ - {Name: "a", Value: "a2"}, - {Name: "b", Value: "b2"}, - }, - Samples: []cortexpb.Sample{ - { - Value: 8, - TimestampMs: 1, - }, - { - Value: 9, - TimestampMs: 2, - }, - }, - }, - }, - }, - } - - for i, c := range testExpr { - t.Run(fmt.Sprintf("[%d]", i), func(t *testing.T) { - result, err := FromResult(c.input) - if c.err { - require.NotNil(t, err) - } else { - require.Nil(t, err) - require.Equal(t, c.expected, result) - } - }) - } -} diff --git a/internal/cortex/querier/series/series_set.go b/internal/cortex/querier/series/series_set.go index 9e4d28c591..d86d3d1f1e 100644 --- a/internal/cortex/querier/series/series_set.go +++ b/internal/cortex/querier/series/series_set.go @@ -27,8 +27,6 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" - - "github.com/thanos-io/thanos/internal/cortex/prom1/storage/metric" ) // ConcreteSeriesSet implements storage.SeriesSet. @@ -203,18 +201,6 @@ func MatrixToSeriesSet(m model.Matrix) storage.SeriesSet { return NewConcreteSeriesSet(series) } -// MetricsToSeriesSet creates a storage.SeriesSet from a []metric.Metric -func MetricsToSeriesSet(ms []metric.Metric) storage.SeriesSet { - series := make([]storage.Series, 0, len(ms)) - for _, m := range ms { - series = append(series, &ConcreteSeries{ - labels: metricToLabels(m.Metric), - samples: nil, - }) - } - return NewConcreteSeriesSet(series) -} - func metricToLabels(m model.Metric) labels.Labels { ls := make(labels.Labels, 0, len(m)) for k, v := range m { diff --git a/internal/cortex/querier/store_gateway_client.go b/internal/cortex/querier/store_gateway_client.go deleted file mode 100644 index ab6e25dce4..0000000000 --- a/internal/cortex/querier/store_gateway_client.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package querier - -import ( - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" - - "github.com/thanos-io/thanos/internal/cortex/storegateway/storegatewaypb" - "github.com/thanos-io/thanos/internal/cortex/util/tls" -) - -type storeGatewayClient struct { - storegatewaypb.StoreGatewayClient - grpc_health_v1.HealthClient - conn *grpc.ClientConn -} - -type ClientConfig struct { - TLSEnabled bool `yaml:"tls_enabled"` - TLS tls.ClientConfig `yaml:",inline"` -} diff --git a/internal/cortex/storegateway/storegatewaypb/gateway.pb.go b/internal/cortex/storegateway/storegatewaypb/gateway.pb.go deleted file mode 100644 index fa5913faf4..0000000000 --- a/internal/cortex/storegateway/storegatewaypb/gateway.pb.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: gateway.proto - -package storegatewaypb - -import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - storepb "github.com/thanos-io/thanos/pkg/store/storepb" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } - -var fileDescriptor_f1a937782ebbded5 = []byte{ - // 257 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4d, 0x4f, 0x2c, 0x49, - 0x2d, 0x4f, 0xac, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0x72, 0x0b, 0x92, 0xa4, - 0xcc, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x4b, 0x32, 0x12, 0xf3, - 0xf2, 0x8b, 0x75, 0x33, 0xf3, 0xa1, 0x2c, 0xfd, 0x82, 0xec, 0x74, 0xfd, 0xe2, 0x92, 0xfc, 0xa2, - 0x54, 0x08, 0x59, 0x90, 0xa4, 0x5f, 0x54, 0x90, 0x0c, 0x31, 0xc3, 0xe8, 0x1a, 0x23, 0x17, 0x4f, - 0x30, 0x48, 0xd4, 0x1d, 0x62, 0x96, 0x90, 0x25, 0x17, 0x5b, 0x70, 0x6a, 0x51, 0x66, 0x6a, 0xb1, - 0x90, 0xa8, 0x1e, 0x44, 0xbf, 0x1e, 0x84, 0x1f, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x22, 0x25, - 0x86, 0x2e, 0x5c, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x6a, 0xc0, 0x28, 0xe4, 0xcc, 0xc5, 0xe5, 0x93, - 0x98, 0x94, 0x9a, 0xe3, 0x97, 0x98, 0x9b, 0x5a, 0x2c, 0x24, 0x09, 0x53, 0x87, 0x10, 0x83, 0x19, - 0x21, 0x85, 0x4d, 0x0a, 0x62, 0x8c, 0x90, 0x1b, 0x17, 0x37, 0x58, 0x34, 0x2c, 0x31, 0xa7, 0x34, - 0xb5, 0x58, 0x08, 0x55, 0x29, 0x44, 0x10, 0x66, 0x8c, 0x34, 0x56, 0x39, 0x88, 0x39, 0x4e, 0x2e, - 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, - 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, - 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, - 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0xf8, 0xc0, 0x21, 0x04, 0x0f, 0xd7, 0x24, 0x36, - 0x70, 0x28, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xec, 0xe6, 0x0a, 0x7a, 0x01, 0x00, - 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// StoreGatewayClient is the client API for StoreGateway service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type StoreGatewayClient interface { - // Series streams each Series for given label matchers and time range. - // - // Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain - // partition of the single series, but once a new series is started to be streamed it means that no more data will - // be sent for previous one. - // - // Series are sorted. - Series(ctx context.Context, in *storepb.SeriesRequest, opts ...grpc.CallOption) (StoreGateway_SeriesClient, error) - // LabelNames returns all label names that is available. - LabelNames(ctx context.Context, in *storepb.LabelNamesRequest, opts ...grpc.CallOption) (*storepb.LabelNamesResponse, error) - // LabelValues returns all label values for given label name. - LabelValues(ctx context.Context, in *storepb.LabelValuesRequest, opts ...grpc.CallOption) (*storepb.LabelValuesResponse, error) -} - -type storeGatewayClient struct { - cc *grpc.ClientConn -} - -func NewStoreGatewayClient(cc *grpc.ClientConn) StoreGatewayClient { - return &storeGatewayClient{cc} -} - -func (c *storeGatewayClient) Series(ctx context.Context, in *storepb.SeriesRequest, opts ...grpc.CallOption) (StoreGateway_SeriesClient, error) { - stream, err := c.cc.NewStream(ctx, &_StoreGateway_serviceDesc.Streams[0], "/gatewaypb.StoreGateway/Series", opts...) - if err != nil { - return nil, err - } - x := &storeGatewaySeriesClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type StoreGateway_SeriesClient interface { - Recv() (*storepb.SeriesResponse, error) - grpc.ClientStream -} - -type storeGatewaySeriesClient struct { - grpc.ClientStream -} - -func (x *storeGatewaySeriesClient) Recv() (*storepb.SeriesResponse, error) { - m := new(storepb.SeriesResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storeGatewayClient) LabelNames(ctx context.Context, in *storepb.LabelNamesRequest, opts ...grpc.CallOption) (*storepb.LabelNamesResponse, error) { - out := new(storepb.LabelNamesResponse) - err := c.cc.Invoke(ctx, "/gatewaypb.StoreGateway/LabelNames", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *storeGatewayClient) LabelValues(ctx context.Context, in *storepb.LabelValuesRequest, opts ...grpc.CallOption) (*storepb.LabelValuesResponse, error) { - out := new(storepb.LabelValuesResponse) - err := c.cc.Invoke(ctx, "/gatewaypb.StoreGateway/LabelValues", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// StoreGatewayServer is the server API for StoreGateway service. -type StoreGatewayServer interface { - // Series streams each Series for given label matchers and time range. - // - // Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain - // partition of the single series, but once a new series is started to be streamed it means that no more data will - // be sent for previous one. - // - // Series are sorted. - Series(*storepb.SeriesRequest, StoreGateway_SeriesServer) error - // LabelNames returns all label names that is available. - LabelNames(context.Context, *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) - // LabelValues returns all label values for given label name. - LabelValues(context.Context, *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) -} - -// UnimplementedStoreGatewayServer can be embedded to have forward compatible implementations. -type UnimplementedStoreGatewayServer struct { -} - -func (*UnimplementedStoreGatewayServer) Series(req *storepb.SeriesRequest, srv StoreGateway_SeriesServer) error { - return status.Errorf(codes.Unimplemented, "method Series not implemented") -} -func (*UnimplementedStoreGatewayServer) LabelNames(ctx context.Context, req *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LabelNames not implemented") -} -func (*UnimplementedStoreGatewayServer) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LabelValues not implemented") -} - -func RegisterStoreGatewayServer(s *grpc.Server, srv StoreGatewayServer) { - s.RegisterService(&_StoreGateway_serviceDesc, srv) -} - -func _StoreGateway_Series_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(storepb.SeriesRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(StoreGatewayServer).Series(m, &storeGatewaySeriesServer{stream}) -} - -type StoreGateway_SeriesServer interface { - Send(*storepb.SeriesResponse) error - grpc.ServerStream -} - -type storeGatewaySeriesServer struct { - grpc.ServerStream -} - -func (x *storeGatewaySeriesServer) Send(m *storepb.SeriesResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _StoreGateway_LabelNames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(storepb.LabelNamesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StoreGatewayServer).LabelNames(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/gatewaypb.StoreGateway/LabelNames", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StoreGatewayServer).LabelNames(ctx, req.(*storepb.LabelNamesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _StoreGateway_LabelValues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(storepb.LabelValuesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(StoreGatewayServer).LabelValues(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/gatewaypb.StoreGateway/LabelValues", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StoreGatewayServer).LabelValues(ctx, req.(*storepb.LabelValuesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _StoreGateway_serviceDesc = grpc.ServiceDesc{ - ServiceName: "gatewaypb.StoreGateway", - HandlerType: (*StoreGatewayServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "LabelNames", - Handler: _StoreGateway_LabelNames_Handler, - }, - { - MethodName: "LabelValues", - Handler: _StoreGateway_LabelValues_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Series", - Handler: _StoreGateway_Series_Handler, - ServerStreams: true, - }, - }, - Metadata: "gateway.proto", -} diff --git a/internal/cortex/storegateway/storegatewaypb/gateway.proto b/internal/cortex/storegateway/storegatewaypb/gateway.proto deleted file mode 100644 index 3e4783407b..0000000000 --- a/internal/cortex/storegateway/storegatewaypb/gateway.proto +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -syntax = "proto3"; -package gatewaypb; - -import "github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto"; - -option go_package = "storegatewaypb"; - -service StoreGateway { - // Series streams each Series for given label matchers and time range. - // - // Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain - // partition of the single series, but once a new series is started to be streamed it means that no more data will - // be sent for previous one. - // - // Series are sorted. - rpc Series(thanos.SeriesRequest) returns (stream thanos.SeriesResponse); - - // LabelNames returns all label names that is available. - rpc LabelNames(thanos.LabelNamesRequest) returns (thanos.LabelNamesResponse); - - // LabelValues returns all label values for given label name. - rpc LabelValues(thanos.LabelValuesRequest) returns (thanos.LabelValuesResponse); -} diff --git a/internal/cortex/tenant/resolver.go b/internal/cortex/tenant/resolver.go index a83cfb16e6..2a15a70bf8 100644 --- a/internal/cortex/tenant/resolver.go +++ b/internal/cortex/tenant/resolver.go @@ -6,7 +6,6 @@ package tenant import ( "context" "errors" - "net/http" "strings" "github.com/weaveworks/common/user" @@ -14,11 +13,6 @@ import ( var defaultResolver Resolver = NewSingleResolver() -// WithDefaultResolver updates the resolver used for the package methods. -func WithDefaultResolver(r Resolver) { - defaultResolver = r -} - // TenantID returns exactly a single tenant ID from the context. It should be // used when a certain endpoint should only support exactly a single // tenant ID. It returns an error user.ErrNoOrgID if there is no tenant ID @@ -100,64 +94,3 @@ func (t *SingleResolver) TenantIDs(ctx context.Context) ([]string, error) { } return []string{orgID}, err } - -type MultiResolver struct { -} - -// NewMultiResolver creates a tenant resolver, which allows request to have -// multiple tenant ids submitted separated by a '|' character. This enforces -// further limits on the character set allowed within tenants as detailed here: -// https://cortexmetrics.io/docs/guides/limitations/#tenant-id-naming) -func NewMultiResolver() *MultiResolver { - return &MultiResolver{} -} - -func (t *MultiResolver) TenantID(ctx context.Context) (string, error) { - orgIDs, err := t.TenantIDs(ctx) - if err != nil { - return "", err - } - - if len(orgIDs) > 1 { - return "", user.ErrTooManyOrgIDs - } - - return orgIDs[0], nil -} - -func (t *MultiResolver) TenantIDs(ctx context.Context) ([]string, error) { - //lint:ignore faillint wrapper around upstream method - orgID, err := user.ExtractOrgID(ctx) - if err != nil { - return nil, err - } - - orgIDs := strings.Split(orgID, tenantIDsLabelSeparator) - for _, orgID := range orgIDs { - if err := ValidTenantID(orgID); err != nil { - return nil, err - } - if containsUnsafePathSegments(orgID) { - return nil, errInvalidTenantID - } - } - - return NormalizeTenantIDs(orgIDs), nil -} - -// ExtractTenantIDFromHTTPRequest extracts a single TenantID through a given -// resolver directly from a HTTP request. -func ExtractTenantIDFromHTTPRequest(req *http.Request) (string, context.Context, error) { - //lint:ignore faillint wrapper around upstream method - _, ctx, err := user.ExtractOrgIDFromHTTPRequest(req) - if err != nil { - return "", nil, err - } - - tenantID, err := defaultResolver.TenantID(ctx) - if err != nil { - return "", nil, err - } - - return tenantID, ctx, nil -} diff --git a/internal/cortex/tenant/resolver_test.go b/internal/cortex/tenant/resolver_test.go index 220cb97d69..33168c68ea 100644 --- a/internal/cortex/tenant/resolver_test.go +++ b/internal/cortex/tenant/resolver_test.go @@ -106,47 +106,3 @@ func TestSingleResolver(t *testing.T) { t.Run(tc.name, tc.test(r)) } } - -func TestMultiResolver(t *testing.T) { - r := NewMultiResolver() - for _, tc := range append(commonResolverTestCases, []resolverTestCase{ - { - name: "multi-tenant", - headerValue: strptr("tenant-a|tenant-b"), - errTenantID: user.ErrTooManyOrgIDs, - tenantIDs: []string{"tenant-a", "tenant-b"}, - }, - { - name: "multi-tenant-wrong-order", - headerValue: strptr("tenant-b|tenant-a"), - errTenantID: user.ErrTooManyOrgIDs, - tenantIDs: []string{"tenant-a", "tenant-b"}, - }, - { - name: "multi-tenant-duplicate-order", - headerValue: strptr("tenant-b|tenant-b|tenant-a"), - errTenantID: user.ErrTooManyOrgIDs, - tenantIDs: []string{"tenant-a", "tenant-b"}, - }, - { - name: "multi-tenant-with-relative-path", - headerValue: strptr("tenant-a|tenant-b|.."), - errTenantID: errInvalidTenantID, - errTenantIDs: errInvalidTenantID, - }, - { - name: "containing-forward-slash", - headerValue: strptr("forward/slash"), - errTenantID: &errTenantIDUnsupportedCharacter{pos: 7, tenantID: "forward/slash"}, - errTenantIDs: &errTenantIDUnsupportedCharacter{pos: 7, tenantID: "forward/slash"}, - }, - { - name: "containing-backward-slash", - headerValue: strptr(`backward\slash`), - errTenantID: &errTenantIDUnsupportedCharacter{pos: 8, tenantID: "backward\\slash"}, - errTenantIDs: &errTenantIDUnsupportedCharacter{pos: 8, tenantID: "backward\\slash"}, - }, - }...) { - t.Run(tc.name, tc.test(r)) - } -} diff --git a/internal/cortex/tenant/tenant.go b/internal/cortex/tenant/tenant.go index 8c9ffca22c..599cc231f0 100644 --- a/internal/cortex/tenant/tenant.go +++ b/internal/cortex/tenant/tenant.go @@ -4,106 +4,11 @@ package tenant import ( - "context" - "errors" - "fmt" - "sort" "strings" - - "github.com/weaveworks/common/user" -) - -var ( - errTenantIDTooLong = errors.New("tenant ID is too long: max 150 characters") ) -type errTenantIDUnsupportedCharacter struct { - pos int - tenantID string -} - -func (e *errTenantIDUnsupportedCharacter) Error() string { - return fmt.Sprintf( - "tenant ID '%s' contains unsupported character '%c'", - e.tenantID, - e.tenantID[e.pos], - ) -} - const tenantIDsLabelSeparator = "|" -// NormalizeTenantIDs is creating a normalized form by sortiing and de-duplicating the list of tenantIDs -func NormalizeTenantIDs(tenantIDs []string) []string { - sort.Strings(tenantIDs) - - count := len(tenantIDs) - if count <= 1 { - return tenantIDs - } - - posOut := 1 - for posIn := 1; posIn < count; posIn++ { - if tenantIDs[posIn] != tenantIDs[posIn-1] { - tenantIDs[posOut] = tenantIDs[posIn] - posOut++ - } - } - - return tenantIDs[0:posOut] -} - -// ValidTenantID -func ValidTenantID(s string) error { - // check if it contains invalid runes - for pos, r := range s { - if !isSupported(r) { - return &errTenantIDUnsupportedCharacter{ - tenantID: s, - pos: pos, - } - } - } - - if len(s) > 150 { - return errTenantIDTooLong - } - - return nil -} - func JoinTenantIDs(tenantIDs []string) string { return strings.Join(tenantIDs, tenantIDsLabelSeparator) } - -// this checks if a rune is supported in tenant IDs (according to -// https://cortexmetrics.io/docs/guides/limitations/#tenant-id-naming) -func isSupported(c rune) bool { - // characters - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') { - return true - } - - // digits - if '0' <= c && c <= '9' { - return true - } - - // special - return c == '!' || - c == '-' || - c == '_' || - c == '.' || - c == '*' || - c == '\'' || - c == '(' || - c == ')' -} - -// TenantIDsFromOrgID extracts different tenants from an orgID string value -// -// ignore stutter warning -// -//nolint:golint -func TenantIDsFromOrgID(orgID string) ([]string, error) { - return TenantIDs(user.InjectOrgID(context.TODO(), orgID)) -} diff --git a/internal/cortex/tenant/tenant_test.go b/internal/cortex/tenant/tenant_test.go deleted file mode 100644 index 3c743c284a..0000000000 --- a/internal/cortex/tenant/tenant_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) The Cortex Authors. -// Licensed under the Apache License 2.0. - -package tenant - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestValidTenantIDs(t *testing.T) { - for _, tc := range []struct { - name string - err *string - }{ - { - name: "tenant-a", - }, - { - name: "ABCDEFGHIJKLMNOPQRSTUVWXYZ-abcdefghijklmnopqrstuvwxyz_0987654321!.*'()", - }, - { - name: "invalid|", - err: strptr("tenant ID 'invalid|' contains unsupported character '|'"), - }, - { - name: strings.Repeat("a", 150), - }, - { - name: strings.Repeat("a", 151), - err: strptr("tenant ID is too long: max 150 characters"), - }, - } { - t.Run(tc.name, func(t *testing.T) { - err := ValidTenantID(tc.name) - if tc.err == nil { - assert.Nil(t, err) - } else { - assert.EqualError(t, err, *tc.err) - } - }) - } -}