diff --git a/.mockery.yaml b/.mockery.yaml index 4d7097c9a1..e03e72fc0f 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -11,4 +11,6 @@ packages: github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1: interfaces: MetastoreServiceClient: - + github.com/grafana/pyroscope/api/gen/proto/go/querier/v1/querierv1connect: + interfaces: + QuerierServiceClient: diff --git a/api/gen/proto/go/query/v1/query.pb.go b/api/gen/proto/go/query/v1/query.pb.go new file mode 100644 index 0000000000..a63136b443 --- /dev/null +++ b/api/gen/proto/go/query/v1/query.pb.go @@ -0,0 +1,1930 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc (unknown) +// source: query/v1/query.proto + +package queryv1 + +import ( + _ "github.com/grafana/pyroscope/api/gen/proto/go/google/v1" + v1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + v11 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type QueryType int32 + +const ( + QueryType_QUERY_UNSPECIFIED QueryType = 0 + QueryType_QUERY_LABEL_NAMES QueryType = 1 + QueryType_QUERY_LABEL_VALUES QueryType = 2 + QueryType_QUERY_SERIES_LABELS QueryType = 3 + QueryType_QUERY_TIME_SERIES QueryType = 4 + QueryType_QUERY_TREE QueryType = 5 + QueryType_QUERY_PPROF QueryType = 6 +) + +// Enum value maps for QueryType. +var ( + QueryType_name = map[int32]string{ + 0: "QUERY_UNSPECIFIED", + 1: "QUERY_LABEL_NAMES", + 2: "QUERY_LABEL_VALUES", + 3: "QUERY_SERIES_LABELS", + 4: "QUERY_TIME_SERIES", + 5: "QUERY_TREE", + 6: "QUERY_PPROF", + } + QueryType_value = map[string]int32{ + "QUERY_UNSPECIFIED": 0, + "QUERY_LABEL_NAMES": 1, + "QUERY_LABEL_VALUES": 2, + "QUERY_SERIES_LABELS": 3, + "QUERY_TIME_SERIES": 4, + "QUERY_TREE": 5, + "QUERY_PPROF": 6, + } +) + +func (x QueryType) Enum() *QueryType { + p := new(QueryType) + *p = x + return p +} + +func (x QueryType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (QueryType) Descriptor() protoreflect.EnumDescriptor { + return file_query_v1_query_proto_enumTypes[0].Descriptor() +} + +func (QueryType) Type() protoreflect.EnumType { + return &file_query_v1_query_proto_enumTypes[0] +} + +func (x QueryType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use QueryType.Descriptor instead. +func (QueryType) EnumDescriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{0} +} + +type ReportType int32 + +const ( + ReportType_REPORT_UNSPECIFIED ReportType = 0 + ReportType_REPORT_LABEL_NAMES ReportType = 1 + ReportType_REPORT_LABEL_VALUES ReportType = 2 + ReportType_REPORT_SERIES_LABELS ReportType = 3 + ReportType_REPORT_TIME_SERIES ReportType = 4 + ReportType_REPORT_TREE ReportType = 5 + ReportType_REPORT_PPROF ReportType = 6 +) + +// Enum value maps for ReportType. +var ( + ReportType_name = map[int32]string{ + 0: "REPORT_UNSPECIFIED", + 1: "REPORT_LABEL_NAMES", + 2: "REPORT_LABEL_VALUES", + 3: "REPORT_SERIES_LABELS", + 4: "REPORT_TIME_SERIES", + 5: "REPORT_TREE", + 6: "REPORT_PPROF", + } + ReportType_value = map[string]int32{ + "REPORT_UNSPECIFIED": 0, + "REPORT_LABEL_NAMES": 1, + "REPORT_LABEL_VALUES": 2, + "REPORT_SERIES_LABELS": 3, + "REPORT_TIME_SERIES": 4, + "REPORT_TREE": 5, + "REPORT_PPROF": 6, + } +) + +func (x ReportType) Enum() *ReportType { + p := new(ReportType) + *p = x + return p +} + +func (x ReportType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ReportType) Descriptor() protoreflect.EnumDescriptor { + return file_query_v1_query_proto_enumTypes[1].Descriptor() +} + +func (ReportType) Type() protoreflect.EnumType { + return &file_query_v1_query_proto_enumTypes[1] +} + +func (x ReportType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ReportType.Descriptor instead. +func (ReportType) EnumDescriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{1} +} + +type QueryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartTime int64 `protobuf:"varint,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + EndTime int64 `protobuf:"varint,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + LabelSelector string `protobuf:"bytes,3,opt,name=label_selector,json=labelSelector,proto3" json:"label_selector,omitempty"` + Query []*Query `protobuf:"bytes,4,rep,name=query,proto3" json:"query,omitempty"` +} + +func (x *QueryRequest) Reset() { + *x = QueryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryRequest) ProtoMessage() {} + +func (x *QueryRequest) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead. +func (*QueryRequest) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{0} +} + +func (x *QueryRequest) GetStartTime() int64 { + if x != nil { + return x.StartTime + } + return 0 +} + +func (x *QueryRequest) GetEndTime() int64 { + if x != nil { + return x.EndTime + } + return 0 +} + +func (x *QueryRequest) GetLabelSelector() string { + if x != nil { + return x.LabelSelector + } + return "" +} + +func (x *QueryRequest) GetQuery() []*Query { + if x != nil { + return x.Query + } + return nil +} + +type QueryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Reports []*Report `protobuf:"bytes,1,rep,name=reports,proto3" json:"reports,omitempty"` +} + +func (x *QueryResponse) Reset() { + *x = QueryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryResponse) ProtoMessage() {} + +func (x *QueryResponse) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryResponse.ProtoReflect.Descriptor instead. +func (*QueryResponse) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{1} +} + +func (x *QueryResponse) GetReports() []*Report { + if x != nil { + return x.Reports + } + return nil +} + +type InvokeOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *InvokeOptions) Reset() { + *x = InvokeOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InvokeOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokeOptions) ProtoMessage() {} + +func (x *InvokeOptions) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvokeOptions.ProtoReflect.Descriptor instead. +func (*InvokeOptions) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{2} +} + +type InvokeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tenant []string `protobuf:"bytes,1,rep,name=tenant,proto3" json:"tenant,omitempty"` + StartTime int64 `protobuf:"varint,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + EndTime int64 `protobuf:"varint,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + LabelSelector string `protobuf:"bytes,4,opt,name=label_selector,json=labelSelector,proto3" json:"label_selector,omitempty"` + Query []*Query `protobuf:"bytes,5,rep,name=query,proto3" json:"query,omitempty"` + QueryPlan *QueryPlan `protobuf:"bytes,6,opt,name=query_plan,json=queryPlan,proto3" json:"query_plan,omitempty"` + Options *InvokeOptions `protobuf:"bytes,7,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *InvokeRequest) Reset() { + *x = InvokeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InvokeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokeRequest) ProtoMessage() {} + +func (x *InvokeRequest) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvokeRequest.ProtoReflect.Descriptor instead. +func (*InvokeRequest) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{3} +} + +func (x *InvokeRequest) GetTenant() []string { + if x != nil { + return x.Tenant + } + return nil +} + +func (x *InvokeRequest) GetStartTime() int64 { + if x != nil { + return x.StartTime + } + return 0 +} + +func (x *InvokeRequest) GetEndTime() int64 { + if x != nil { + return x.EndTime + } + return 0 +} + +func (x *InvokeRequest) GetLabelSelector() string { + if x != nil { + return x.LabelSelector + } + return "" +} + +func (x *InvokeRequest) GetQuery() []*Query { + if x != nil { + return x.Query + } + return nil +} + +func (x *InvokeRequest) GetQueryPlan() *QueryPlan { + if x != nil { + return x.QueryPlan + } + return nil +} + +func (x *InvokeRequest) GetOptions() *InvokeOptions { + if x != nil { + return x.Options + } + return nil +} + +// Query plan is represented by a DAG, where each node +// might be either "merge" or "read" (leaves). Each node +// references a range: merge nodes refer to other nodes, +// while read nodes refer to the blocks. +type QueryPlan struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Each node is encoded with 3 values: + // - node type: 0 - read, 1 - merge; + // - range offset; + // - range length. + Graph []uint32 `protobuf:"varint,1,rep,packed,name=graph,proto3" json:"graph,omitempty"` + // The blocks matching the query. + Blocks []*v1.BlockMeta `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` +} + +func (x *QueryPlan) Reset() { + *x = QueryPlan{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryPlan) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryPlan) ProtoMessage() {} + +func (x *QueryPlan) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryPlan.ProtoReflect.Descriptor instead. +func (*QueryPlan) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{4} +} + +func (x *QueryPlan) GetGraph() []uint32 { + if x != nil { + return x.Graph + } + return nil +} + +func (x *QueryPlan) GetBlocks() []*v1.BlockMeta { + if x != nil { + return x.Blocks + } + return nil +} + +type Query struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + QueryType QueryType `protobuf:"varint,1,opt,name=query_type,json=queryType,proto3,enum=query.v1.QueryType" json:"query_type,omitempty"` + // Exactly one of the following fields should be set, + // depending on the query type. + LabelNames *LabelNamesQuery `protobuf:"bytes,2,opt,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` + LabelValues *LabelValuesQuery `protobuf:"bytes,3,opt,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` + SeriesLabels *SeriesLabelsQuery `protobuf:"bytes,4,opt,name=series_labels,json=seriesLabels,proto3" json:"series_labels,omitempty"` + TimeSeries *TimeSeriesQuery `protobuf:"bytes,5,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + Tree *TreeQuery `protobuf:"bytes,6,opt,name=tree,proto3" json:"tree,omitempty"` + Pprof *PprofQuery `protobuf:"bytes,7,opt,name=pprof,proto3" json:"pprof,omitempty"` +} + +func (x *Query) Reset() { + *x = Query{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Query) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Query) ProtoMessage() {} + +func (x *Query) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Query.ProtoReflect.Descriptor instead. +func (*Query) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{5} +} + +func (x *Query) GetQueryType() QueryType { + if x != nil { + return x.QueryType + } + return QueryType_QUERY_UNSPECIFIED +} + +func (x *Query) GetLabelNames() *LabelNamesQuery { + if x != nil { + return x.LabelNames + } + return nil +} + +func (x *Query) GetLabelValues() *LabelValuesQuery { + if x != nil { + return x.LabelValues + } + return nil +} + +func (x *Query) GetSeriesLabels() *SeriesLabelsQuery { + if x != nil { + return x.SeriesLabels + } + return nil +} + +func (x *Query) GetTimeSeries() *TimeSeriesQuery { + if x != nil { + return x.TimeSeries + } + return nil +} + +func (x *Query) GetTree() *TreeQuery { + if x != nil { + return x.Tree + } + return nil +} + +func (x *Query) GetPprof() *PprofQuery { + if x != nil { + return x.Pprof + } + return nil +} + +type InvokeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Reports []*Report `protobuf:"bytes,1,rep,name=reports,proto3" json:"reports,omitempty"` + Diagnostics *Diagnostics `protobuf:"bytes,2,opt,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *InvokeResponse) Reset() { + *x = InvokeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InvokeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokeResponse) ProtoMessage() {} + +func (x *InvokeResponse) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvokeResponse.ProtoReflect.Descriptor instead. +func (*InvokeResponse) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{6} +} + +func (x *InvokeResponse) GetReports() []*Report { + if x != nil { + return x.Reports + } + return nil +} + +func (x *InvokeResponse) GetDiagnostics() *Diagnostics { + if x != nil { + return x.Diagnostics + } + return nil +} + +// Diagnostic messages, events, statistics, analytics, etc. +type Diagnostics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Diagnostics) Reset() { + *x = Diagnostics{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Diagnostics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Diagnostics) ProtoMessage() {} + +func (x *Diagnostics) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Diagnostics.ProtoReflect.Descriptor instead. +func (*Diagnostics) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{7} +} + +type Report struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReportType ReportType `protobuf:"varint,1,opt,name=report_type,json=reportType,proto3,enum=query.v1.ReportType" json:"report_type,omitempty"` + // Exactly one of the following fields should be set, + // depending on the report type. + LabelNames *LabelNamesReport `protobuf:"bytes,2,opt,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` + LabelValues *LabelValuesReport `protobuf:"bytes,3,opt,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` + SeriesLabels *SeriesLabelsReport `protobuf:"bytes,4,opt,name=series_labels,json=seriesLabels,proto3" json:"series_labels,omitempty"` + TimeSeries *TimeSeriesReport `protobuf:"bytes,5,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` + Tree *TreeReport `protobuf:"bytes,6,opt,name=tree,proto3" json:"tree,omitempty"` + Pprof *PprofReport `protobuf:"bytes,7,opt,name=pprof,proto3" json:"pprof,omitempty"` +} + +func (x *Report) Reset() { + *x = Report{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Report) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Report) ProtoMessage() {} + +func (x *Report) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Report.ProtoReflect.Descriptor instead. +func (*Report) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{8} +} + +func (x *Report) GetReportType() ReportType { + if x != nil { + return x.ReportType + } + return ReportType_REPORT_UNSPECIFIED +} + +func (x *Report) GetLabelNames() *LabelNamesReport { + if x != nil { + return x.LabelNames + } + return nil +} + +func (x *Report) GetLabelValues() *LabelValuesReport { + if x != nil { + return x.LabelValues + } + return nil +} + +func (x *Report) GetSeriesLabels() *SeriesLabelsReport { + if x != nil { + return x.SeriesLabels + } + return nil +} + +func (x *Report) GetTimeSeries() *TimeSeriesReport { + if x != nil { + return x.TimeSeries + } + return nil +} + +func (x *Report) GetTree() *TreeReport { + if x != nil { + return x.Tree + } + return nil +} + +func (x *Report) GetPprof() *PprofReport { + if x != nil { + return x.Pprof + } + return nil +} + +type LabelNamesQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *LabelNamesQuery) Reset() { + *x = LabelNamesQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelNamesQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelNamesQuery) ProtoMessage() {} + +func (x *LabelNamesQuery) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelNamesQuery.ProtoReflect.Descriptor instead. +func (*LabelNamesQuery) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{9} +} + +type LabelNamesReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Query *LabelNamesQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + LabelNames []string `protobuf:"bytes,2,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` +} + +func (x *LabelNamesReport) Reset() { + *x = LabelNamesReport{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelNamesReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelNamesReport) ProtoMessage() {} + +func (x *LabelNamesReport) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelNamesReport.ProtoReflect.Descriptor instead. +func (*LabelNamesReport) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{10} +} + +func (x *LabelNamesReport) GetQuery() *LabelNamesQuery { + if x != nil { + return x.Query + } + return nil +} + +func (x *LabelNamesReport) GetLabelNames() []string { + if x != nil { + return x.LabelNames + } + return nil +} + +type LabelValuesQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName,proto3" json:"label_name,omitempty"` +} + +func (x *LabelValuesQuery) Reset() { + *x = LabelValuesQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelValuesQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelValuesQuery) ProtoMessage() {} + +func (x *LabelValuesQuery) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelValuesQuery.ProtoReflect.Descriptor instead. +func (*LabelValuesQuery) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{11} +} + +func (x *LabelValuesQuery) GetLabelName() string { + if x != nil { + return x.LabelName + } + return "" +} + +type LabelValuesReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Query *LabelValuesQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + LabelValues []string `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` +} + +func (x *LabelValuesReport) Reset() { + *x = LabelValuesReport{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelValuesReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelValuesReport) ProtoMessage() {} + +func (x *LabelValuesReport) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelValuesReport.ProtoReflect.Descriptor instead. +func (*LabelValuesReport) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{12} +} + +func (x *LabelValuesReport) GetQuery() *LabelValuesQuery { + if x != nil { + return x.Query + } + return nil +} + +func (x *LabelValuesReport) GetLabelValues() []string { + if x != nil { + return x.LabelValues + } + return nil +} + +type SeriesLabelsQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` +} + +func (x *SeriesLabelsQuery) Reset() { + *x = SeriesLabelsQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SeriesLabelsQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SeriesLabelsQuery) ProtoMessage() {} + +func (x *SeriesLabelsQuery) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SeriesLabelsQuery.ProtoReflect.Descriptor instead. +func (*SeriesLabelsQuery) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{13} +} + +func (x *SeriesLabelsQuery) GetLabelNames() []string { + if x != nil { + return x.LabelNames + } + return nil +} + +type SeriesLabelsReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Query *SeriesLabelsQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + SeriesLabels []*v11.Labels `protobuf:"bytes,2,rep,name=series_labels,json=seriesLabels,proto3" json:"series_labels,omitempty"` +} + +func (x *SeriesLabelsReport) Reset() { + *x = SeriesLabelsReport{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SeriesLabelsReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SeriesLabelsReport) ProtoMessage() {} + +func (x *SeriesLabelsReport) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SeriesLabelsReport.ProtoReflect.Descriptor instead. +func (*SeriesLabelsReport) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{14} +} + +func (x *SeriesLabelsReport) GetQuery() *SeriesLabelsQuery { + if x != nil { + return x.Query + } + return nil +} + +func (x *SeriesLabelsReport) GetSeriesLabels() []*v11.Labels { + if x != nil { + return x.SeriesLabels + } + return nil +} + +type TimeSeriesQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Step float64 `protobuf:"fixed64,1,opt,name=step,proto3" json:"step,omitempty"` + GroupBy []string `protobuf:"bytes,2,rep,name=group_by,json=groupBy,proto3" json:"group_by,omitempty"` +} + +func (x *TimeSeriesQuery) Reset() { + *x = TimeSeriesQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesQuery) ProtoMessage() {} + +func (x *TimeSeriesQuery) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesQuery.ProtoReflect.Descriptor instead. +func (*TimeSeriesQuery) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{15} +} + +func (x *TimeSeriesQuery) GetStep() float64 { + if x != nil { + return x.Step + } + return 0 +} + +func (x *TimeSeriesQuery) GetGroupBy() []string { + if x != nil { + return x.GroupBy + } + return nil +} + +type TimeSeriesReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Query *TimeSeriesQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + TimeSeries []*v11.Series `protobuf:"bytes,2,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` +} + +func (x *TimeSeriesReport) Reset() { + *x = TimeSeriesReport{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeriesReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeriesReport) ProtoMessage() {} + +func (x *TimeSeriesReport) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeriesReport.ProtoReflect.Descriptor instead. +func (*TimeSeriesReport) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{16} +} + +func (x *TimeSeriesReport) GetQuery() *TimeSeriesQuery { + if x != nil { + return x.Query + } + return nil +} + +func (x *TimeSeriesReport) GetTimeSeries() []*v11.Series { + if x != nil { + return x.TimeSeries + } + return nil +} + +type TreeQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MaxNodes int64 `protobuf:"varint,1,opt,name=max_nodes,json=maxNodes,proto3" json:"max_nodes,omitempty"` +} + +func (x *TreeQuery) Reset() { + *x = TreeQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TreeQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TreeQuery) ProtoMessage() {} + +func (x *TreeQuery) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TreeQuery.ProtoReflect.Descriptor instead. +func (*TreeQuery) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{17} +} + +func (x *TreeQuery) GetMaxNodes() int64 { + if x != nil { + return x.MaxNodes + } + return 0 +} + +type TreeReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Query *TreeQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + Tree []byte `protobuf:"bytes,2,opt,name=tree,proto3" json:"tree,omitempty"` +} + +func (x *TreeReport) Reset() { + *x = TreeReport{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TreeReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TreeReport) ProtoMessage() {} + +func (x *TreeReport) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TreeReport.ProtoReflect.Descriptor instead. +func (*TreeReport) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{18} +} + +func (x *TreeReport) GetQuery() *TreeQuery { + if x != nil { + return x.Query + } + return nil +} + +func (x *TreeReport) GetTree() []byte { + if x != nil { + return x.Tree + } + return nil +} + +type PprofQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MaxNodes int64 `protobuf:"varint,1,opt,name=max_nodes,json=maxNodes,proto3" json:"max_nodes,omitempty"` // TODO(kolesnikovae): Go PGO options. +} + +func (x *PprofQuery) Reset() { + *x = PprofQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PprofQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PprofQuery) ProtoMessage() {} + +func (x *PprofQuery) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PprofQuery.ProtoReflect.Descriptor instead. +func (*PprofQuery) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{19} +} + +func (x *PprofQuery) GetMaxNodes() int64 { + if x != nil { + return x.MaxNodes + } + return 0 +} + +type PprofReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Query *PprofQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + Pprof []byte `protobuf:"bytes,2,opt,name=pprof,proto3" json:"pprof,omitempty"` +} + +func (x *PprofReport) Reset() { + *x = PprofReport{} + if protoimpl.UnsafeEnabled { + mi := &file_query_v1_query_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PprofReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PprofReport) ProtoMessage() {} + +func (x *PprofReport) ProtoReflect() protoreflect.Message { + mi := &file_query_v1_query_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PprofReport.ProtoReflect.Descriptor instead. +func (*PprofReport) Descriptor() ([]byte, []int) { + return file_query_v1_query_proto_rawDescGZIP(), []int{20} +} + +func (x *PprofReport) GetQuery() *PprofQuery { + if x != nil { + return x.Query + } + return nil +} + +func (x *PprofReport) GetPprof() []byte { + if x != nil { + return x.Pprof + } + return nil +} + +var File_query_v1_query_proto protoreflect.FileDescriptor + +var file_query_v1_query_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, + 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x6d, 0x65, 0x74, 0x61, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x76, + 0x31, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x96, 0x01, + 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, + 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x25, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x3b, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x07, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x73, 0x22, 0x0f, 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x96, 0x02, 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, + 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x25, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x32, 0x0a, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, + 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x52, + 0x09, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x52, 0x0a, + 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x67, 0x72, + 0x61, 0x70, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x05, 0x67, 0x72, 0x61, 0x70, 0x68, + 0x12, 0x2f, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x73, 0x22, 0x89, 0x03, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x32, 0x0a, 0x0a, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x13, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x71, 0x75, 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x3a, 0x0a, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x0b, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x0d, 0x73, 0x65, + 0x72, 0x69, 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x0c, + 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3a, 0x0a, 0x0b, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x0a, 0x74, 0x69, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x04, 0x74, 0x72, 0x65, + 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x22, 0x75, 0x0a, + 0x0e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2a, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x07, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x22, 0x0d, 0x0a, 0x0b, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x22, 0x93, 0x03, 0x0a, 0x06, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x35, + 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x0c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, + 0x72, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x65, 0x65, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x04, 0x74, 0x72, 0x65, 0x65, 0x12, 0x2b, 0x0a, 0x05, + 0x70, 0x70, 0x72, 0x6f, 0x66, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x05, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x22, 0x11, 0x0a, 0x0f, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x22, 0x64, 0x0a, 0x10, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x12, 0x2f, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x22, 0x31, 0x0a, 0x10, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x68, 0x0a, 0x11, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x05, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, + 0x34, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x7e, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x35, + 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x22, 0x40, 0x0a, 0x0f, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x74, 0x65, 0x70, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x73, 0x74, 0x65, 0x70, 0x12, 0x19, 0x0a, 0x08, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x62, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x22, 0x76, 0x0a, 0x10, 0x54, 0x69, 0x6d, 0x65, 0x53, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2f, 0x0a, 0x05, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x31, 0x0a, 0x0b, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, + 0x28, 0x0a, 0x09, 0x54, 0x72, 0x65, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x08, 0x6d, 0x61, 0x78, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x4b, 0x0a, 0x0a, 0x54, 0x72, 0x65, + 0x65, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x74, 0x72, 0x65, 0x65, 0x22, 0x29, 0x0a, 0x0a, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x4e, 0x6f, 0x64, 0x65, + 0x73, 0x22, 0x4f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x12, 0x2a, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x70, 0x70, 0x72, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x70, 0x72, + 0x6f, 0x66, 0x2a, 0xa2, 0x01, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x15, 0x0a, 0x11, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x51, 0x55, 0x45, 0x52, 0x59, + 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x53, 0x10, 0x01, 0x12, 0x16, + 0x0a, 0x12, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x56, 0x41, + 0x4c, 0x55, 0x45, 0x53, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, + 0x53, 0x45, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x10, 0x03, 0x12, + 0x15, 0x0a, 0x11, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x49, 0x45, 0x53, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, + 0x54, 0x52, 0x45, 0x45, 0x10, 0x05, 0x12, 0x0f, 0x0a, 0x0b, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, + 0x50, 0x50, 0x52, 0x4f, 0x46, 0x10, 0x06, 0x2a, 0xaa, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, + 0x0a, 0x12, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4e, + 0x41, 0x4d, 0x45, 0x53, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, + 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x10, 0x02, 0x12, + 0x18, 0x0a, 0x14, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x49, 0x45, 0x53, + 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x50, + 0x4f, 0x52, 0x54, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x49, 0x45, 0x53, 0x10, + 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x54, 0x52, 0x45, 0x45, + 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x50, 0x50, 0x52, + 0x4f, 0x46, 0x10, 0x06, 0x32, 0x52, 0x0a, 0x14, 0x51, 0x75, 0x65, 0x72, 0x79, 0x46, 0x72, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3a, 0x0a, 0x05, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, + 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0x54, 0x0a, 0x13, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x3d, 0x0a, 0x06, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, + 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x9b, + 0x01, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x42, + 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, + 0x61, 0x2f, 0x70, 0x79, 0x72, 0x6f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2f, 0x76, 0x31, 0x3b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x76, 0x31, 0xa2, 0x02, 0x03, + 0x51, 0x58, 0x58, 0xaa, 0x02, 0x08, 0x51, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x31, 0xca, 0x02, + 0x08, 0x51, 0x75, 0x65, 0x72, 0x79, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x14, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_query_v1_query_proto_rawDescOnce sync.Once + file_query_v1_query_proto_rawDescData = file_query_v1_query_proto_rawDesc +) + +func file_query_v1_query_proto_rawDescGZIP() []byte { + file_query_v1_query_proto_rawDescOnce.Do(func() { + file_query_v1_query_proto_rawDescData = protoimpl.X.CompressGZIP(file_query_v1_query_proto_rawDescData) + }) + return file_query_v1_query_proto_rawDescData +} + +var file_query_v1_query_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_query_v1_query_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_query_v1_query_proto_goTypes = []any{ + (QueryType)(0), // 0: query.v1.QueryType + (ReportType)(0), // 1: query.v1.ReportType + (*QueryRequest)(nil), // 2: query.v1.QueryRequest + (*QueryResponse)(nil), // 3: query.v1.QueryResponse + (*InvokeOptions)(nil), // 4: query.v1.InvokeOptions + (*InvokeRequest)(nil), // 5: query.v1.InvokeRequest + (*QueryPlan)(nil), // 6: query.v1.QueryPlan + (*Query)(nil), // 7: query.v1.Query + (*InvokeResponse)(nil), // 8: query.v1.InvokeResponse + (*Diagnostics)(nil), // 9: query.v1.Diagnostics + (*Report)(nil), // 10: query.v1.Report + (*LabelNamesQuery)(nil), // 11: query.v1.LabelNamesQuery + (*LabelNamesReport)(nil), // 12: query.v1.LabelNamesReport + (*LabelValuesQuery)(nil), // 13: query.v1.LabelValuesQuery + (*LabelValuesReport)(nil), // 14: query.v1.LabelValuesReport + (*SeriesLabelsQuery)(nil), // 15: query.v1.SeriesLabelsQuery + (*SeriesLabelsReport)(nil), // 16: query.v1.SeriesLabelsReport + (*TimeSeriesQuery)(nil), // 17: query.v1.TimeSeriesQuery + (*TimeSeriesReport)(nil), // 18: query.v1.TimeSeriesReport + (*TreeQuery)(nil), // 19: query.v1.TreeQuery + (*TreeReport)(nil), // 20: query.v1.TreeReport + (*PprofQuery)(nil), // 21: query.v1.PprofQuery + (*PprofReport)(nil), // 22: query.v1.PprofReport + (*v1.BlockMeta)(nil), // 23: metastore.v1.BlockMeta + (*v11.Labels)(nil), // 24: types.v1.Labels + (*v11.Series)(nil), // 25: types.v1.Series +} +var file_query_v1_query_proto_depIdxs = []int32{ + 7, // 0: query.v1.QueryRequest.query:type_name -> query.v1.Query + 10, // 1: query.v1.QueryResponse.reports:type_name -> query.v1.Report + 7, // 2: query.v1.InvokeRequest.query:type_name -> query.v1.Query + 6, // 3: query.v1.InvokeRequest.query_plan:type_name -> query.v1.QueryPlan + 4, // 4: query.v1.InvokeRequest.options:type_name -> query.v1.InvokeOptions + 23, // 5: query.v1.QueryPlan.blocks:type_name -> metastore.v1.BlockMeta + 0, // 6: query.v1.Query.query_type:type_name -> query.v1.QueryType + 11, // 7: query.v1.Query.label_names:type_name -> query.v1.LabelNamesQuery + 13, // 8: query.v1.Query.label_values:type_name -> query.v1.LabelValuesQuery + 15, // 9: query.v1.Query.series_labels:type_name -> query.v1.SeriesLabelsQuery + 17, // 10: query.v1.Query.time_series:type_name -> query.v1.TimeSeriesQuery + 19, // 11: query.v1.Query.tree:type_name -> query.v1.TreeQuery + 21, // 12: query.v1.Query.pprof:type_name -> query.v1.PprofQuery + 10, // 13: query.v1.InvokeResponse.reports:type_name -> query.v1.Report + 9, // 14: query.v1.InvokeResponse.diagnostics:type_name -> query.v1.Diagnostics + 1, // 15: query.v1.Report.report_type:type_name -> query.v1.ReportType + 12, // 16: query.v1.Report.label_names:type_name -> query.v1.LabelNamesReport + 14, // 17: query.v1.Report.label_values:type_name -> query.v1.LabelValuesReport + 16, // 18: query.v1.Report.series_labels:type_name -> query.v1.SeriesLabelsReport + 18, // 19: query.v1.Report.time_series:type_name -> query.v1.TimeSeriesReport + 20, // 20: query.v1.Report.tree:type_name -> query.v1.TreeReport + 22, // 21: query.v1.Report.pprof:type_name -> query.v1.PprofReport + 11, // 22: query.v1.LabelNamesReport.query:type_name -> query.v1.LabelNamesQuery + 13, // 23: query.v1.LabelValuesReport.query:type_name -> query.v1.LabelValuesQuery + 15, // 24: query.v1.SeriesLabelsReport.query:type_name -> query.v1.SeriesLabelsQuery + 24, // 25: query.v1.SeriesLabelsReport.series_labels:type_name -> types.v1.Labels + 17, // 26: query.v1.TimeSeriesReport.query:type_name -> query.v1.TimeSeriesQuery + 25, // 27: query.v1.TimeSeriesReport.time_series:type_name -> types.v1.Series + 19, // 28: query.v1.TreeReport.query:type_name -> query.v1.TreeQuery + 21, // 29: query.v1.PprofReport.query:type_name -> query.v1.PprofQuery + 2, // 30: query.v1.QueryFrontendService.Query:input_type -> query.v1.QueryRequest + 5, // 31: query.v1.QueryBackendService.Invoke:input_type -> query.v1.InvokeRequest + 3, // 32: query.v1.QueryFrontendService.Query:output_type -> query.v1.QueryResponse + 8, // 33: query.v1.QueryBackendService.Invoke:output_type -> query.v1.InvokeResponse + 32, // [32:34] is the sub-list for method output_type + 30, // [30:32] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_query_v1_query_proto_init() } +func file_query_v1_query_proto_init() { + if File_query_v1_query_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_query_v1_query_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*QueryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*QueryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*InvokeOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*InvokeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*QueryPlan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*Query); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*InvokeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*Diagnostics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*Report); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*LabelNamesQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*LabelNamesReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*LabelValuesQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*LabelValuesReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*SeriesLabelsQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[14].Exporter = func(v any, i int) any { + switch v := v.(*SeriesLabelsReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[15].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[16].Exporter = func(v any, i int) any { + switch v := v.(*TimeSeriesReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[17].Exporter = func(v any, i int) any { + switch v := v.(*TreeQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[18].Exporter = func(v any, i int) any { + switch v := v.(*TreeReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[19].Exporter = func(v any, i int) any { + switch v := v.(*PprofQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_v1_query_proto_msgTypes[20].Exporter = func(v any, i int) any { + switch v := v.(*PprofReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_query_v1_query_proto_rawDesc, + NumEnums: 2, + NumMessages: 21, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_query_v1_query_proto_goTypes, + DependencyIndexes: file_query_v1_query_proto_depIdxs, + EnumInfos: file_query_v1_query_proto_enumTypes, + MessageInfos: file_query_v1_query_proto_msgTypes, + }.Build() + File_query_v1_query_proto = out.File + file_query_v1_query_proto_rawDesc = nil + file_query_v1_query_proto_goTypes = nil + file_query_v1_query_proto_depIdxs = nil +} diff --git a/api/gen/proto/go/querybackend/v1/querybackend_vtproto.pb.go b/api/gen/proto/go/query/v1/query_vtproto.pb.go similarity index 80% rename from api/gen/proto/go/querybackend/v1/querybackend_vtproto.pb.go rename to api/gen/proto/go/query/v1/query_vtproto.pb.go index 1e5ac09fe1..2144a633fc 100644 --- a/api/gen/proto/go/querybackend/v1/querybackend_vtproto.pb.go +++ b/api/gen/proto/go/query/v1/query_vtproto.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. // protoc-gen-go-vtproto version: v0.6.0 -// source: querybackend/v1/querybackend.proto +// source: query/v1/query.proto -package querybackendv1 +package queryv1 import ( context "context" @@ -27,6 +27,55 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *QueryRequest) CloneVT() *QueryRequest { + if m == nil { + return (*QueryRequest)(nil) + } + r := new(QueryRequest) + r.StartTime = m.StartTime + r.EndTime = m.EndTime + r.LabelSelector = m.LabelSelector + if rhs := m.Query; rhs != nil { + tmpContainer := make([]*Query, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Query = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *QueryRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *QueryResponse) CloneVT() *QueryResponse { + if m == nil { + return (*QueryResponse)(nil) + } + r := new(QueryResponse) + if rhs := m.Reports; rhs != nil { + tmpContainer := make([]*Report, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Reports = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *QueryResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *InvokeOptions) CloneVT() *InvokeOptions { if m == nil { return (*InvokeOptions)(nil) @@ -119,6 +168,7 @@ func (m *Query) CloneVT() *Query { r.SeriesLabels = m.SeriesLabels.CloneVT() r.TimeSeries = m.TimeSeries.CloneVT() r.Tree = m.Tree.CloneVT() + r.Pprof = m.Pprof.CloneVT() if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -181,6 +231,7 @@ func (m *Report) CloneVT() *Report { r.SeriesLabels = m.SeriesLabels.CloneVT() r.TimeSeries = m.TimeSeries.CloneVT() r.Tree = m.Tree.CloneVT() + r.Pprof = m.Pprof.CloneVT() if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -329,10 +380,6 @@ func (m *TimeSeriesQuery) CloneVT() *TimeSeriesQuery { copy(tmpContainer, rhs) r.GroupBy = tmpContainer } - if rhs := m.Aggregation; rhs != nil { - tmpVal := *rhs - r.Aggregation = &tmpVal - } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -411,6 +458,120 @@ func (m *TreeReport) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *PprofQuery) CloneVT() *PprofQuery { + if m == nil { + return (*PprofQuery)(nil) + } + r := new(PprofQuery) + r.MaxNodes = m.MaxNodes + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PprofQuery) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PprofReport) CloneVT() *PprofReport { + if m == nil { + return (*PprofReport)(nil) + } + r := new(PprofReport) + r.Query = m.Query.CloneVT() + if rhs := m.Pprof; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Pprof = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PprofReport) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (this *QueryRequest) EqualVT(that *QueryRequest) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.StartTime != that.StartTime { + return false + } + if this.EndTime != that.EndTime { + return false + } + if this.LabelSelector != that.LabelSelector { + return false + } + if len(this.Query) != len(that.Query) { + return false + } + for i, vx := range this.Query { + vy := that.Query[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &Query{} + } + if q == nil { + q = &Query{} + } + if !p.EqualVT(q) { + return false + } + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *QueryRequest) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*QueryRequest) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *QueryResponse) EqualVT(that *QueryResponse) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Reports) != len(that.Reports) { + return false + } + for i, vx := range this.Reports { + vy := that.Reports[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &Report{} + } + if q == nil { + q = &Report{} + } + if !p.EqualVT(q) { + return false + } + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *QueryResponse) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*QueryResponse) + if !ok { + return false + } + return this.EqualVT(that) +} func (this *InvokeOptions) EqualVT(that *InvokeOptions) bool { if this == that { return true @@ -554,6 +715,9 @@ func (this *Query) EqualVT(that *Query) bool { if !this.Tree.EqualVT(that.Tree) { return false } + if !this.Pprof.EqualVT(that.Pprof) { + return false + } return string(this.unknownFields) == string(that.unknownFields) } @@ -640,6 +804,9 @@ func (this *Report) EqualVT(that *Report) bool { if !this.Tree.EqualVT(that.Tree) { return false } + if !this.Pprof.EqualVT(that.Pprof) { + return false + } return string(this.unknownFields) == string(that.unknownFields) } @@ -824,9 +991,6 @@ func (this *TimeSeriesQuery) EqualVT(that *TimeSeriesQuery) bool { return false } } - if p, q := this.Aggregation, that.Aggregation; (p == nil && q != nil) || (p != nil && (q == nil || *p != *q)) { - return false - } return string(this.unknownFields) == string(that.unknownFields) } @@ -918,12 +1082,139 @@ func (this *TreeReport) EqualMessageVT(thatMsg proto.Message) bool { } return this.EqualVT(that) } +func (this *PprofQuery) EqualVT(that *PprofQuery) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.MaxNodes != that.MaxNodes { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *PprofQuery) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*PprofQuery) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *PprofReport) EqualVT(that *PprofReport) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if !this.Query.EqualVT(that.Query) { + return false + } + if string(this.Pprof) != string(that.Pprof) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *PprofReport) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*PprofReport) + if !ok { + return false + } + return this.EqualVT(that) +} // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +// QueryFrontendServiceClient is the client API for QueryFrontendService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type QueryFrontendServiceClient interface { + Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) +} + +type queryFrontendServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewQueryFrontendServiceClient(cc grpc.ClientConnInterface) QueryFrontendServiceClient { + return &queryFrontendServiceClient{cc} +} + +func (c *queryFrontendServiceClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResponse, error) { + out := new(QueryResponse) + err := c.cc.Invoke(ctx, "/query.v1.QueryFrontendService/Query", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryFrontendServiceServer is the server API for QueryFrontendService service. +// All implementations must embed UnimplementedQueryFrontendServiceServer +// for forward compatibility +type QueryFrontendServiceServer interface { + Query(context.Context, *QueryRequest) (*QueryResponse, error) + mustEmbedUnimplementedQueryFrontendServiceServer() +} + +// UnimplementedQueryFrontendServiceServer must be embedded to have forward compatible implementations. +type UnimplementedQueryFrontendServiceServer struct { +} + +func (UnimplementedQueryFrontendServiceServer) Query(context.Context, *QueryRequest) (*QueryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") +} +func (UnimplementedQueryFrontendServiceServer) mustEmbedUnimplementedQueryFrontendServiceServer() {} + +// UnsafeQueryFrontendServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to QueryFrontendServiceServer will +// result in compilation errors. +type UnsafeQueryFrontendServiceServer interface { + mustEmbedUnimplementedQueryFrontendServiceServer() +} + +func RegisterQueryFrontendServiceServer(s grpc.ServiceRegistrar, srv QueryFrontendServiceServer) { + s.RegisterService(&QueryFrontendService_ServiceDesc, srv) +} + +func _QueryFrontendService_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryFrontendServiceServer).Query(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/query.v1.QueryFrontendService/Query", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryFrontendServiceServer).Query(ctx, req.(*QueryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// QueryFrontendService_ServiceDesc is the grpc.ServiceDesc for QueryFrontendService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var QueryFrontendService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "query.v1.QueryFrontendService", + HandlerType: (*QueryFrontendServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Query", + Handler: _QueryFrontendService_Query_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "query/v1/query.proto", +} + // QueryBackendServiceClient is the client API for QueryBackendService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -941,7 +1232,7 @@ func NewQueryBackendServiceClient(cc grpc.ClientConnInterface) QueryBackendServi func (c *queryBackendServiceClient) Invoke(ctx context.Context, in *InvokeRequest, opts ...grpc.CallOption) (*InvokeResponse, error) { out := new(InvokeResponse) - err := c.cc.Invoke(ctx, "/querybackend.v1.QueryBackendService/Invoke", in, out, opts...) + err := c.cc.Invoke(ctx, "/query.v1.QueryBackendService/Invoke", in, out, opts...) if err != nil { return nil, err } @@ -986,7 +1277,7 @@ func _QueryBackendService_Invoke_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/querybackend.v1.QueryBackendService/Invoke", + FullMethod: "/query.v1.QueryBackendService/Invoke", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(QueryBackendServiceServer).Invoke(ctx, req.(*InvokeRequest)) @@ -998,7 +1289,7 @@ func _QueryBackendService_Invoke_Handler(srv interface{}, ctx context.Context, d // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) var QueryBackendService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "querybackend.v1.QueryBackendService", + ServiceName: "query.v1.QueryBackendService", HandlerType: (*QueryBackendServiceServer)(nil), Methods: []grpc.MethodDesc{ { @@ -1007,10 +1298,10 @@ var QueryBackendService_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "querybackend/v1/querybackend.proto", + Metadata: "query/v1/query.proto", } -func (m *InvokeOptions) MarshalVT() (dAtA []byte, err error) { +func (m *QueryRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1023,12 +1314,12 @@ func (m *InvokeOptions) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InvokeOptions) MarshalToVT(dAtA []byte) (int, error) { +func (m *QueryRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InvokeOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *QueryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1040,21 +1331,128 @@ func (m *InvokeOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - return len(dAtA) - i, nil -} - -func (m *InvokeRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + if len(m.Query) > 0 { + for iNdEx := len(m.Query) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Query[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } } - return dAtA[:n], nil -} + if len(m.LabelSelector) > 0 { + i -= len(m.LabelSelector) + copy(dAtA[i:], m.LabelSelector) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LabelSelector))) + i-- + dAtA[i] = 0x1a + } + if m.EndTime != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.EndTime)) + i-- + dAtA[i] = 0x10 + } + if m.StartTime != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.StartTime)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *QueryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Reports) > 0 { + for iNdEx := len(m.Reports) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Reports[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *InvokeOptions) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InvokeOptions) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InvokeOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *InvokeRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} func (m *InvokeRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() @@ -1241,6 +1639,16 @@ func (m *Query) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Pprof != nil { + size, err := m.Pprof.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } if m.Tree != nil { size, err := m.Tree.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -1417,6 +1825,16 @@ func (m *Report) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Pprof != nil { + size, err := m.Pprof.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } if m.Tree != nil { size, err := m.Tree.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -1791,11 +2209,6 @@ func (m *TimeSeriesQuery) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Aggregation != nil { - i = protohelpers.EncodeVarint(dAtA, i, uint64(*m.Aggregation)) - i-- - dAtA[i] = 0x18 - } if len(m.GroupBy) > 0 { for iNdEx := len(m.GroupBy) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.GroupBy[iNdEx]) @@ -1969,6 +2382,136 @@ func (m *TreeReport) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PprofQuery) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PprofQuery) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PprofQuery) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.MaxNodes != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxNodes)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PprofReport) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PprofReport) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PprofReport) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Pprof) > 0 { + i -= len(m.Pprof) + copy(dAtA[i:], m.Pprof) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Pprof))) + i-- + dAtA[i] = 0x12 + } + if m.Query != nil { + size, err := m.Query.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTime != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.StartTime)) + } + if m.EndTime != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.EndTime)) + } + l = len(m.LabelSelector) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Query) > 0 { + for _, e := range m.Query { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *QueryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Reports) > 0 { + for _, e := range m.Reports { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + func (m *InvokeOptions) SizeVT() (n int) { if m == nil { return 0 @@ -2077,6 +2620,10 @@ func (m *Query) SizeVT() (n int) { l = m.Tree.SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + if m.Pprof != nil { + l = m.Pprof.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } n += len(m.unknownFields) return n } @@ -2140,6 +2687,10 @@ func (m *Report) SizeVT() (n int) { l = m.Tree.SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + if m.Pprof != nil { + l = m.Pprof.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } n += len(m.unknownFields) return n } @@ -2265,9 +2816,6 @@ func (m *TimeSeriesQuery) SizeVT() (n int) { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } } - if m.Aggregation != nil { - n += 1 + protohelpers.SizeOfVarint(uint64(*m.Aggregation)) - } n += len(m.unknownFields) return n } @@ -2329,13 +2877,284 @@ func (m *TreeReport) SizeVT() (n int) { return n } -func (m *InvokeOptions) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { +func (m *PprofQuery) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxNodes != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MaxNodes)) + } + n += len(m.unknownFields) + return n +} + +func (m *PprofReport) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Pprof) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *QueryRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + m.StartTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTime", wireType) + } + m.EndTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = append(m.Query, &Query{}) + if err := m.Query[len(m.Query)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reports = append(m.Reports, &Report{}) + if err := m.Reports[len(m.Reports)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InvokeOptions) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow } @@ -2924,13 +3743,49 @@ func (m *Query) UnmarshalVT(dAtA []byte) error { if m.LabelValues == nil { m.LabelValues = &LabelValuesQuery{} } - if err := m.LabelValues.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LabelValues.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeriesLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SeriesLabels == nil { + m.SeriesLabels = &SeriesLabelsQuery{} + } + if err := m.SeriesLabels.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SeriesLabels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TimeSeries", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2957,16 +3812,16 @@ func (m *Query) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SeriesLabels == nil { - m.SeriesLabels = &SeriesLabelsQuery{} + if m.TimeSeries == nil { + m.TimeSeries = &TimeSeriesQuery{} } - if err := m.SeriesLabels.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TimeSeries.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeSeries", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tree", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2993,16 +3848,16 @@ func (m *Query) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TimeSeries == nil { - m.TimeSeries = &TimeSeriesQuery{} + if m.Tree == nil { + m.Tree = &TreeQuery{} } - if err := m.TimeSeries.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Tree.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tree", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pprof", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3029,10 +3884,10 @@ func (m *Query) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Tree == nil { - m.Tree = &TreeQuery{} + if m.Pprof == nil { + m.Pprof = &PprofQuery{} } - if err := m.Tree.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Pprof.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3458,6 +4313,42 @@ func (m *Report) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pprof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pprof == nil { + m.Pprof = &PprofReport{} + } + if err := m.Pprof.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -4136,26 +5027,6 @@ func (m *TimeSeriesQuery) UnmarshalVT(dAtA []byte) error { } m.GroupBy = append(m.GroupBy, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Aggregation", wireType) - } - var v v11.TimeSeriesAggregationType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= v11.TimeSeriesAggregationType(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Aggregation = &v default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -4498,3 +5369,194 @@ func (m *TreeReport) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *PprofQuery) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PprofQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PprofQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxNodes", wireType) + } + m.MaxNodes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxNodes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PprofReport) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PprofReport: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PprofReport: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Query == nil { + m.Query = &PprofQuery{} + } + if err := m.Query.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pprof", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pprof = append(m.Pprof[:0], dAtA[iNdEx:postIndex]...) + if m.Pprof == nil { + m.Pprof = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/api/gen/proto/go/query/v1/queryv1connect/query.connect.go b/api/gen/proto/go/query/v1/queryv1connect/query.connect.go new file mode 100644 index 0000000000..032bfcec88 --- /dev/null +++ b/api/gen/proto/go/query/v1/queryv1connect/query.connect.go @@ -0,0 +1,188 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: query/v1/query.proto + +package queryv1connect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + v1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // QueryFrontendServiceName is the fully-qualified name of the QueryFrontendService service. + QueryFrontendServiceName = "query.v1.QueryFrontendService" + // QueryBackendServiceName is the fully-qualified name of the QueryBackendService service. + QueryBackendServiceName = "query.v1.QueryBackendService" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // QueryFrontendServiceQueryProcedure is the fully-qualified name of the QueryFrontendService's + // Query RPC. + QueryFrontendServiceQueryProcedure = "/query.v1.QueryFrontendService/Query" + // QueryBackendServiceInvokeProcedure is the fully-qualified name of the QueryBackendService's + // Invoke RPC. + QueryBackendServiceInvokeProcedure = "/query.v1.QueryBackendService/Invoke" +) + +// These variables are the protoreflect.Descriptor objects for the RPCs defined in this package. +var ( + queryFrontendServiceServiceDescriptor = v1.File_query_v1_query_proto.Services().ByName("QueryFrontendService") + queryFrontendServiceQueryMethodDescriptor = queryFrontendServiceServiceDescriptor.Methods().ByName("Query") + queryBackendServiceServiceDescriptor = v1.File_query_v1_query_proto.Services().ByName("QueryBackendService") + queryBackendServiceInvokeMethodDescriptor = queryBackendServiceServiceDescriptor.Methods().ByName("Invoke") +) + +// QueryFrontendServiceClient is a client for the query.v1.QueryFrontendService service. +type QueryFrontendServiceClient interface { + Query(context.Context, *connect.Request[v1.QueryRequest]) (*connect.Response[v1.QueryResponse], error) +} + +// NewQueryFrontendServiceClient constructs a client for the query.v1.QueryFrontendService service. +// By default, it uses the Connect protocol with the binary Protobuf Codec, asks for gzipped +// responses, and sends uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the +// connect.WithGRPC() or connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewQueryFrontendServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) QueryFrontendServiceClient { + baseURL = strings.TrimRight(baseURL, "/") + return &queryFrontendServiceClient{ + query: connect.NewClient[v1.QueryRequest, v1.QueryResponse]( + httpClient, + baseURL+QueryFrontendServiceQueryProcedure, + connect.WithSchema(queryFrontendServiceQueryMethodDescriptor), + connect.WithClientOptions(opts...), + ), + } +} + +// queryFrontendServiceClient implements QueryFrontendServiceClient. +type queryFrontendServiceClient struct { + query *connect.Client[v1.QueryRequest, v1.QueryResponse] +} + +// Query calls query.v1.QueryFrontendService.Query. +func (c *queryFrontendServiceClient) Query(ctx context.Context, req *connect.Request[v1.QueryRequest]) (*connect.Response[v1.QueryResponse], error) { + return c.query.CallUnary(ctx, req) +} + +// QueryFrontendServiceHandler is an implementation of the query.v1.QueryFrontendService service. +type QueryFrontendServiceHandler interface { + Query(context.Context, *connect.Request[v1.QueryRequest]) (*connect.Response[v1.QueryResponse], error) +} + +// NewQueryFrontendServiceHandler builds an HTTP handler from the service implementation. It returns +// the path on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewQueryFrontendServiceHandler(svc QueryFrontendServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { + queryFrontendServiceQueryHandler := connect.NewUnaryHandler( + QueryFrontendServiceQueryProcedure, + svc.Query, + connect.WithSchema(queryFrontendServiceQueryMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + return "/query.v1.QueryFrontendService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case QueryFrontendServiceQueryProcedure: + queryFrontendServiceQueryHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedQueryFrontendServiceHandler returns CodeUnimplemented from all methods. +type UnimplementedQueryFrontendServiceHandler struct{} + +func (UnimplementedQueryFrontendServiceHandler) Query(context.Context, *connect.Request[v1.QueryRequest]) (*connect.Response[v1.QueryResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("query.v1.QueryFrontendService.Query is not implemented")) +} + +// QueryBackendServiceClient is a client for the query.v1.QueryBackendService service. +type QueryBackendServiceClient interface { + Invoke(context.Context, *connect.Request[v1.InvokeRequest]) (*connect.Response[v1.InvokeResponse], error) +} + +// NewQueryBackendServiceClient constructs a client for the query.v1.QueryBackendService service. By +// default, it uses the Connect protocol with the binary Protobuf Codec, asks for gzipped responses, +// and sends uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the +// connect.WithGRPC() or connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewQueryBackendServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) QueryBackendServiceClient { + baseURL = strings.TrimRight(baseURL, "/") + return &queryBackendServiceClient{ + invoke: connect.NewClient[v1.InvokeRequest, v1.InvokeResponse]( + httpClient, + baseURL+QueryBackendServiceInvokeProcedure, + connect.WithSchema(queryBackendServiceInvokeMethodDescriptor), + connect.WithClientOptions(opts...), + ), + } +} + +// queryBackendServiceClient implements QueryBackendServiceClient. +type queryBackendServiceClient struct { + invoke *connect.Client[v1.InvokeRequest, v1.InvokeResponse] +} + +// Invoke calls query.v1.QueryBackendService.Invoke. +func (c *queryBackendServiceClient) Invoke(ctx context.Context, req *connect.Request[v1.InvokeRequest]) (*connect.Response[v1.InvokeResponse], error) { + return c.invoke.CallUnary(ctx, req) +} + +// QueryBackendServiceHandler is an implementation of the query.v1.QueryBackendService service. +type QueryBackendServiceHandler interface { + Invoke(context.Context, *connect.Request[v1.InvokeRequest]) (*connect.Response[v1.InvokeResponse], error) +} + +// NewQueryBackendServiceHandler builds an HTTP handler from the service implementation. It returns +// the path on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewQueryBackendServiceHandler(svc QueryBackendServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { + queryBackendServiceInvokeHandler := connect.NewUnaryHandler( + QueryBackendServiceInvokeProcedure, + svc.Invoke, + connect.WithSchema(queryBackendServiceInvokeMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + return "/query.v1.QueryBackendService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case QueryBackendServiceInvokeProcedure: + queryBackendServiceInvokeHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedQueryBackendServiceHandler returns CodeUnimplemented from all methods. +type UnimplementedQueryBackendServiceHandler struct{} + +func (UnimplementedQueryBackendServiceHandler) Invoke(context.Context, *connect.Request[v1.InvokeRequest]) (*connect.Response[v1.InvokeResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("query.v1.QueryBackendService.Invoke is not implemented")) +} diff --git a/api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.mux.go b/api/gen/proto/go/query/v1/queryv1connect/query.connect.mux.go similarity index 60% rename from api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.mux.go rename to api/gen/proto/go/query/v1/queryv1connect/query.connect.mux.go index 2a58ad96eb..f57faef705 100644 --- a/api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.mux.go +++ b/api/gen/proto/go/query/v1/queryv1connect/query.connect.mux.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-connect-go-mux. DO NOT EDIT. // -// Source: querybackend/v1/querybackend.proto +// Source: query/v1/query.proto -package querybackendv1connect +package queryv1connect import ( connect "connectrpc.com/connect" @@ -16,11 +16,21 @@ import ( // version compiled into your binary. const _ = connect.IsAtLeastVersion0_1_0 +// RegisterQueryFrontendServiceHandler register an HTTP handler to a mux.Router from the service +// implementation. +func RegisterQueryFrontendServiceHandler(mux *mux.Router, svc QueryFrontendServiceHandler, opts ...connect.HandlerOption) { + mux.Handle("/query.v1.QueryFrontendService/Query", connect.NewUnaryHandler( + "/query.v1.QueryFrontendService/Query", + svc.Query, + opts..., + )) +} + // RegisterQueryBackendServiceHandler register an HTTP handler to a mux.Router from the service // implementation. func RegisterQueryBackendServiceHandler(mux *mux.Router, svc QueryBackendServiceHandler, opts ...connect.HandlerOption) { - mux.Handle("/querybackend.v1.QueryBackendService/Invoke", connect.NewUnaryHandler( - "/querybackend.v1.QueryBackendService/Invoke", + mux.Handle("/query.v1.QueryBackendService/Invoke", connect.NewUnaryHandler( + "/query.v1.QueryBackendService/Invoke", svc.Invoke, opts..., )) diff --git a/api/gen/proto/go/querybackend/v1/querybackend.pb.go b/api/gen/proto/go/querybackend/v1/querybackend.pb.go deleted file mode 100644 index fdb37dbfc7..0000000000 --- a/api/gen/proto/go/querybackend/v1/querybackend.pb.go +++ /dev/null @@ -1,1627 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.34.2 -// protoc (unknown) -// source: querybackend/v1/querybackend.proto - -package querybackendv1 - -import ( - _ "github.com/grafana/pyroscope/api/gen/proto/go/google/v1" - v1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" - v11 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type QueryType int32 - -const ( - QueryType_QUERY_UNSPECIFIED QueryType = 0 - QueryType_QUERY_LABEL_NAMES QueryType = 1 - QueryType_QUERY_LABEL_VALUES QueryType = 2 - QueryType_QUERY_SERIES_LABELS QueryType = 3 - QueryType_QUERY_TIME_SERIES QueryType = 4 - QueryType_QUERY_TREE QueryType = 5 -) - -// Enum value maps for QueryType. -var ( - QueryType_name = map[int32]string{ - 0: "QUERY_UNSPECIFIED", - 1: "QUERY_LABEL_NAMES", - 2: "QUERY_LABEL_VALUES", - 3: "QUERY_SERIES_LABELS", - 4: "QUERY_TIME_SERIES", - 5: "QUERY_TREE", - } - QueryType_value = map[string]int32{ - "QUERY_UNSPECIFIED": 0, - "QUERY_LABEL_NAMES": 1, - "QUERY_LABEL_VALUES": 2, - "QUERY_SERIES_LABELS": 3, - "QUERY_TIME_SERIES": 4, - "QUERY_TREE": 5, - } -) - -func (x QueryType) Enum() *QueryType { - p := new(QueryType) - *p = x - return p -} - -func (x QueryType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (QueryType) Descriptor() protoreflect.EnumDescriptor { - return file_querybackend_v1_querybackend_proto_enumTypes[0].Descriptor() -} - -func (QueryType) Type() protoreflect.EnumType { - return &file_querybackend_v1_querybackend_proto_enumTypes[0] -} - -func (x QueryType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use QueryType.Descriptor instead. -func (QueryType) EnumDescriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{0} -} - -type ReportType int32 - -const ( - ReportType_REPORT_UNSPECIFIED ReportType = 0 - ReportType_REPORT_LABEL_NAMES ReportType = 1 - ReportType_REPORT_LABEL_VALUES ReportType = 2 - ReportType_REPORT_SERIES_LABELS ReportType = 3 - ReportType_REPORT_TIME_SERIES ReportType = 4 - ReportType_REPORT_TREE ReportType = 5 -) - -// Enum value maps for ReportType. -var ( - ReportType_name = map[int32]string{ - 0: "REPORT_UNSPECIFIED", - 1: "REPORT_LABEL_NAMES", - 2: "REPORT_LABEL_VALUES", - 3: "REPORT_SERIES_LABELS", - 4: "REPORT_TIME_SERIES", - 5: "REPORT_TREE", - } - ReportType_value = map[string]int32{ - "REPORT_UNSPECIFIED": 0, - "REPORT_LABEL_NAMES": 1, - "REPORT_LABEL_VALUES": 2, - "REPORT_SERIES_LABELS": 3, - "REPORT_TIME_SERIES": 4, - "REPORT_TREE": 5, - } -) - -func (x ReportType) Enum() *ReportType { - p := new(ReportType) - *p = x - return p -} - -func (x ReportType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ReportType) Descriptor() protoreflect.EnumDescriptor { - return file_querybackend_v1_querybackend_proto_enumTypes[1].Descriptor() -} - -func (ReportType) Type() protoreflect.EnumType { - return &file_querybackend_v1_querybackend_proto_enumTypes[1] -} - -func (x ReportType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ReportType.Descriptor instead. -func (ReportType) EnumDescriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{1} -} - -type InvokeOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *InvokeOptions) Reset() { - *x = InvokeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *InvokeOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InvokeOptions) ProtoMessage() {} - -func (x *InvokeOptions) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InvokeOptions.ProtoReflect.Descriptor instead. -func (*InvokeOptions) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{0} -} - -type InvokeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Tenant []string `protobuf:"bytes,1,rep,name=tenant,proto3" json:"tenant,omitempty"` - StartTime int64 `protobuf:"varint,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - EndTime int64 `protobuf:"varint,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - LabelSelector string `protobuf:"bytes,4,opt,name=label_selector,json=labelSelector,proto3" json:"label_selector,omitempty"` - Query []*Query `protobuf:"bytes,5,rep,name=query,proto3" json:"query,omitempty"` - QueryPlan *QueryPlan `protobuf:"bytes,6,opt,name=query_plan,json=queryPlan,proto3" json:"query_plan,omitempty"` - Options *InvokeOptions `protobuf:"bytes,7,opt,name=options,proto3" json:"options,omitempty"` -} - -func (x *InvokeRequest) Reset() { - *x = InvokeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *InvokeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InvokeRequest) ProtoMessage() {} - -func (x *InvokeRequest) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InvokeRequest.ProtoReflect.Descriptor instead. -func (*InvokeRequest) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{1} -} - -func (x *InvokeRequest) GetTenant() []string { - if x != nil { - return x.Tenant - } - return nil -} - -func (x *InvokeRequest) GetStartTime() int64 { - if x != nil { - return x.StartTime - } - return 0 -} - -func (x *InvokeRequest) GetEndTime() int64 { - if x != nil { - return x.EndTime - } - return 0 -} - -func (x *InvokeRequest) GetLabelSelector() string { - if x != nil { - return x.LabelSelector - } - return "" -} - -func (x *InvokeRequest) GetQuery() []*Query { - if x != nil { - return x.Query - } - return nil -} - -func (x *InvokeRequest) GetQueryPlan() *QueryPlan { - if x != nil { - return x.QueryPlan - } - return nil -} - -func (x *InvokeRequest) GetOptions() *InvokeOptions { - if x != nil { - return x.Options - } - return nil -} - -// Query plan is represented by a DAG, where each node -// might be either "merge" or "read" (leaves). Each node -// references a range: merge nodes refer to other nodes, -// while read nodes refer to the blocks. -type QueryPlan struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Each node is encoded with 3 values: - // - node type: 0 - read, 1 - merge; - // - range offset; - // - range length. - Graph []uint32 `protobuf:"varint,1,rep,packed,name=graph,proto3" json:"graph,omitempty"` - // The blocks matching the query. - Blocks []*v1.BlockMeta `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` -} - -func (x *QueryPlan) Reset() { - *x = QueryPlan{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QueryPlan) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QueryPlan) ProtoMessage() {} - -func (x *QueryPlan) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QueryPlan.ProtoReflect.Descriptor instead. -func (*QueryPlan) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{2} -} - -func (x *QueryPlan) GetGraph() []uint32 { - if x != nil { - return x.Graph - } - return nil -} - -func (x *QueryPlan) GetBlocks() []*v1.BlockMeta { - if x != nil { - return x.Blocks - } - return nil -} - -type Query struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - QueryType QueryType `protobuf:"varint,1,opt,name=query_type,json=queryType,proto3,enum=querybackend.v1.QueryType" json:"query_type,omitempty"` - // Exactly one of the following fields should be set, - // depending on the query type. - LabelNames *LabelNamesQuery `protobuf:"bytes,2,opt,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` - LabelValues *LabelValuesQuery `protobuf:"bytes,3,opt,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` - SeriesLabels *SeriesLabelsQuery `protobuf:"bytes,4,opt,name=series_labels,json=seriesLabels,proto3" json:"series_labels,omitempty"` - TimeSeries *TimeSeriesQuery `protobuf:"bytes,5,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` - Tree *TreeQuery `protobuf:"bytes,6,opt,name=tree,proto3" json:"tree,omitempty"` -} - -func (x *Query) Reset() { - *x = Query{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Query) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Query) ProtoMessage() {} - -func (x *Query) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Query.ProtoReflect.Descriptor instead. -func (*Query) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{3} -} - -func (x *Query) GetQueryType() QueryType { - if x != nil { - return x.QueryType - } - return QueryType_QUERY_UNSPECIFIED -} - -func (x *Query) GetLabelNames() *LabelNamesQuery { - if x != nil { - return x.LabelNames - } - return nil -} - -func (x *Query) GetLabelValues() *LabelValuesQuery { - if x != nil { - return x.LabelValues - } - return nil -} - -func (x *Query) GetSeriesLabels() *SeriesLabelsQuery { - if x != nil { - return x.SeriesLabels - } - return nil -} - -func (x *Query) GetTimeSeries() *TimeSeriesQuery { - if x != nil { - return x.TimeSeries - } - return nil -} - -func (x *Query) GetTree() *TreeQuery { - if x != nil { - return x.Tree - } - return nil -} - -type InvokeResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Reports []*Report `protobuf:"bytes,1,rep,name=reports,proto3" json:"reports,omitempty"` - Diagnostics *Diagnostics `protobuf:"bytes,2,opt,name=diagnostics,proto3" json:"diagnostics,omitempty"` -} - -func (x *InvokeResponse) Reset() { - *x = InvokeResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *InvokeResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InvokeResponse) ProtoMessage() {} - -func (x *InvokeResponse) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InvokeResponse.ProtoReflect.Descriptor instead. -func (*InvokeResponse) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{4} -} - -func (x *InvokeResponse) GetReports() []*Report { - if x != nil { - return x.Reports - } - return nil -} - -func (x *InvokeResponse) GetDiagnostics() *Diagnostics { - if x != nil { - return x.Diagnostics - } - return nil -} - -// Diagnostic messages, events, statistics, analytics, etc. -type Diagnostics struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Diagnostics) Reset() { - *x = Diagnostics{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Diagnostics) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Diagnostics) ProtoMessage() {} - -func (x *Diagnostics) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Diagnostics.ProtoReflect.Descriptor instead. -func (*Diagnostics) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{5} -} - -type Report struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ReportType ReportType `protobuf:"varint,1,opt,name=report_type,json=reportType,proto3,enum=querybackend.v1.ReportType" json:"report_type,omitempty"` - // Exactly one of the following fields should be set, - // depending on the report type. - LabelNames *LabelNamesReport `protobuf:"bytes,2,opt,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` - LabelValues *LabelValuesReport `protobuf:"bytes,3,opt,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` - SeriesLabels *SeriesLabelsReport `protobuf:"bytes,4,opt,name=series_labels,json=seriesLabels,proto3" json:"series_labels,omitempty"` - TimeSeries *TimeSeriesReport `protobuf:"bytes,5,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` - Tree *TreeReport `protobuf:"bytes,6,opt,name=tree,proto3" json:"tree,omitempty"` -} - -func (x *Report) Reset() { - *x = Report{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Report) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Report) ProtoMessage() {} - -func (x *Report) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Report.ProtoReflect.Descriptor instead. -func (*Report) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{6} -} - -func (x *Report) GetReportType() ReportType { - if x != nil { - return x.ReportType - } - return ReportType_REPORT_UNSPECIFIED -} - -func (x *Report) GetLabelNames() *LabelNamesReport { - if x != nil { - return x.LabelNames - } - return nil -} - -func (x *Report) GetLabelValues() *LabelValuesReport { - if x != nil { - return x.LabelValues - } - return nil -} - -func (x *Report) GetSeriesLabels() *SeriesLabelsReport { - if x != nil { - return x.SeriesLabels - } - return nil -} - -func (x *Report) GetTimeSeries() *TimeSeriesReport { - if x != nil { - return x.TimeSeries - } - return nil -} - -func (x *Report) GetTree() *TreeReport { - if x != nil { - return x.Tree - } - return nil -} - -type LabelNamesQuery struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *LabelNamesQuery) Reset() { - *x = LabelNamesQuery{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LabelNamesQuery) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LabelNamesQuery) ProtoMessage() {} - -func (x *LabelNamesQuery) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LabelNamesQuery.ProtoReflect.Descriptor instead. -func (*LabelNamesQuery) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{7} -} - -type LabelNamesReport struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Query *LabelNamesQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` - LabelNames []string `protobuf:"bytes,2,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` -} - -func (x *LabelNamesReport) Reset() { - *x = LabelNamesReport{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LabelNamesReport) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LabelNamesReport) ProtoMessage() {} - -func (x *LabelNamesReport) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LabelNamesReport.ProtoReflect.Descriptor instead. -func (*LabelNamesReport) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{8} -} - -func (x *LabelNamesReport) GetQuery() *LabelNamesQuery { - if x != nil { - return x.Query - } - return nil -} - -func (x *LabelNamesReport) GetLabelNames() []string { - if x != nil { - return x.LabelNames - } - return nil -} - -type LabelValuesQuery struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName,proto3" json:"label_name,omitempty"` -} - -func (x *LabelValuesQuery) Reset() { - *x = LabelValuesQuery{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LabelValuesQuery) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LabelValuesQuery) ProtoMessage() {} - -func (x *LabelValuesQuery) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LabelValuesQuery.ProtoReflect.Descriptor instead. -func (*LabelValuesQuery) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{9} -} - -func (x *LabelValuesQuery) GetLabelName() string { - if x != nil { - return x.LabelName - } - return "" -} - -type LabelValuesReport struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Query *LabelValuesQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` - LabelValues []string `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` -} - -func (x *LabelValuesReport) Reset() { - *x = LabelValuesReport{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LabelValuesReport) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LabelValuesReport) ProtoMessage() {} - -func (x *LabelValuesReport) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LabelValuesReport.ProtoReflect.Descriptor instead. -func (*LabelValuesReport) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{10} -} - -func (x *LabelValuesReport) GetQuery() *LabelValuesQuery { - if x != nil { - return x.Query - } - return nil -} - -func (x *LabelValuesReport) GetLabelValues() []string { - if x != nil { - return x.LabelValues - } - return nil -} - -type SeriesLabelsQuery struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"` -} - -func (x *SeriesLabelsQuery) Reset() { - *x = SeriesLabelsQuery{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SeriesLabelsQuery) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SeriesLabelsQuery) ProtoMessage() {} - -func (x *SeriesLabelsQuery) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SeriesLabelsQuery.ProtoReflect.Descriptor instead. -func (*SeriesLabelsQuery) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{11} -} - -func (x *SeriesLabelsQuery) GetLabelNames() []string { - if x != nil { - return x.LabelNames - } - return nil -} - -type SeriesLabelsReport struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Query *SeriesLabelsQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` - SeriesLabels []*v11.Labels `protobuf:"bytes,2,rep,name=series_labels,json=seriesLabels,proto3" json:"series_labels,omitempty"` -} - -func (x *SeriesLabelsReport) Reset() { - *x = SeriesLabelsReport{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SeriesLabelsReport) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SeriesLabelsReport) ProtoMessage() {} - -func (x *SeriesLabelsReport) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SeriesLabelsReport.ProtoReflect.Descriptor instead. -func (*SeriesLabelsReport) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{12} -} - -func (x *SeriesLabelsReport) GetQuery() *SeriesLabelsQuery { - if x != nil { - return x.Query - } - return nil -} - -func (x *SeriesLabelsReport) GetSeriesLabels() []*v11.Labels { - if x != nil { - return x.SeriesLabels - } - return nil -} - -type TimeSeriesQuery struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Step float64 `protobuf:"fixed64,1,opt,name=step,proto3" json:"step,omitempty"` - GroupBy []string `protobuf:"bytes,2,rep,name=group_by,json=groupBy,proto3" json:"group_by,omitempty"` - Aggregation *v11.TimeSeriesAggregationType `protobuf:"varint,3,opt,name=aggregation,proto3,enum=types.v1.TimeSeriesAggregationType,oneof" json:"aggregation,omitempty"` -} - -func (x *TimeSeriesQuery) Reset() { - *x = TimeSeriesQuery{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TimeSeriesQuery) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimeSeriesQuery) ProtoMessage() {} - -func (x *TimeSeriesQuery) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimeSeriesQuery.ProtoReflect.Descriptor instead. -func (*TimeSeriesQuery) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{13} -} - -func (x *TimeSeriesQuery) GetStep() float64 { - if x != nil { - return x.Step - } - return 0 -} - -func (x *TimeSeriesQuery) GetGroupBy() []string { - if x != nil { - return x.GroupBy - } - return nil -} - -func (x *TimeSeriesQuery) GetAggregation() v11.TimeSeriesAggregationType { - if x != nil && x.Aggregation != nil { - return *x.Aggregation - } - return v11.TimeSeriesAggregationType(0) -} - -type TimeSeriesReport struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Query *TimeSeriesQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` - TimeSeries []*v11.Series `protobuf:"bytes,2,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"` -} - -func (x *TimeSeriesReport) Reset() { - *x = TimeSeriesReport{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TimeSeriesReport) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimeSeriesReport) ProtoMessage() {} - -func (x *TimeSeriesReport) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimeSeriesReport.ProtoReflect.Descriptor instead. -func (*TimeSeriesReport) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{14} -} - -func (x *TimeSeriesReport) GetQuery() *TimeSeriesQuery { - if x != nil { - return x.Query - } - return nil -} - -func (x *TimeSeriesReport) GetTimeSeries() []*v11.Series { - if x != nil { - return x.TimeSeries - } - return nil -} - -type TreeQuery struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - MaxNodes int64 `protobuf:"varint,1,opt,name=max_nodes,json=maxNodes,proto3" json:"max_nodes,omitempty"` -} - -func (x *TreeQuery) Reset() { - *x = TreeQuery{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TreeQuery) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TreeQuery) ProtoMessage() {} - -func (x *TreeQuery) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TreeQuery.ProtoReflect.Descriptor instead. -func (*TreeQuery) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{15} -} - -func (x *TreeQuery) GetMaxNodes() int64 { - if x != nil { - return x.MaxNodes - } - return 0 -} - -type TreeReport struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Query *TreeQuery `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` - Tree []byte `protobuf:"bytes,2,opt,name=tree,proto3" json:"tree,omitempty"` -} - -func (x *TreeReport) Reset() { - *x = TreeReport{} - if protoimpl.UnsafeEnabled { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TreeReport) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TreeReport) ProtoMessage() {} - -func (x *TreeReport) ProtoReflect() protoreflect.Message { - mi := &file_querybackend_v1_querybackend_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TreeReport.ProtoReflect.Descriptor instead. -func (*TreeReport) Descriptor() ([]byte, []int) { - return file_querybackend_v1_querybackend_proto_rawDescGZIP(), []int{16} -} - -func (x *TreeReport) GetQuery() *TreeQuery { - if x != nil { - return x.Query - } - return nil -} - -func (x *TreeReport) GetTree() []byte { - if x != nil { - return x.Tree - } - return nil -} - -var File_querybackend_v1_querybackend_proto protoreflect.FileDescriptor - -var file_querybackend_v1_querybackend_proto_rawDesc = []byte{ - 0x0a, 0x22, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x76, - 0x31, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, - 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x76, 0x31, - 0x2f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, - 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, - 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x0f, 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0xab, 0x02, 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x12, 0x1d, 0x0a, - 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, - 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0d, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x2c, - 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x39, 0x0a, 0x0a, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, - 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x09, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x52, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x14, - 0x0a, 0x05, 0x67, 0x72, 0x61, 0x70, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x05, 0x67, - 0x72, 0x61, 0x70, 0x68, 0x12, 0x2f, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x06, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x22, 0x87, 0x03, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, - 0x39, 0x0a, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, - 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x09, 0x71, 0x75, 0x65, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x0b, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, - 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x52, 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x44, 0x0a, - 0x0c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, - 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, - 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x0c, - 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x41, 0x0a, 0x0b, - 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, - 0x2e, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, - 0x54, 0x72, 0x65, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x04, 0x74, 0x72, 0x65, 0x65, 0x22, - 0x83, 0x01, 0x0a, 0x0e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, - 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x07, 0x72, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x3e, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x0d, 0x0a, 0x0b, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x03, 0x0a, 0x06, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, - 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, - 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x42, 0x0a, - 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, - 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x12, 0x45, 0x0a, 0x0c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, - 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0b, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, - 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x23, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x12, 0x42, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, - 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, - 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, - 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, - 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x52, 0x04, 0x74, 0x72, 0x65, 0x65, 0x22, 0x11, 0x0a, 0x0f, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x22, 0x6b, 0x0a, 0x10, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x36, - 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, - 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x31, 0x0a, 0x10, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x6f, 0x0a, 0x11, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, - 0x37, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, - 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x11, 0x53, - 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x22, 0x85, 0x01, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x38, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, - 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x12, 0x35, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x0c, 0x73, 0x65, 0x72, - 0x69, 0x65, 0x73, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x0f, 0x54, 0x69, - 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x12, 0x0a, - 0x04, 0x73, 0x74, 0x65, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x73, 0x74, 0x65, - 0x70, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x62, 0x79, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x12, 0x4a, 0x0a, 0x0b, - 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x23, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x61, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7d, 0x0a, 0x10, 0x54, 0x69, 0x6d, 0x65, - 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x36, 0x0a, 0x05, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x12, 0x31, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, - 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x74, 0x69, 0x6d, - 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0x28, 0x0a, 0x09, 0x54, 0x72, 0x65, 0x65, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x6f, 0x64, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x4e, 0x6f, 0x64, 0x65, - 0x73, 0x22, 0x52, 0x0a, 0x0a, 0x54, 0x72, 0x65, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, - 0x30, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, - 0x2e, 0x54, 0x72, 0x65, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x04, 0x74, 0x72, 0x65, 0x65, 0x2a, 0x91, 0x01, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x51, 0x55, - 0x45, 0x52, 0x59, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x53, 0x10, - 0x01, 0x12, 0x16, 0x0a, 0x12, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, - 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x51, 0x55, 0x45, - 0x52, 0x59, 0x5f, 0x53, 0x45, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, - 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x49, 0x4d, 0x45, - 0x5f, 0x53, 0x45, 0x52, 0x49, 0x45, 0x53, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x51, 0x55, 0x45, - 0x52, 0x59, 0x5f, 0x54, 0x52, 0x45, 0x45, 0x10, 0x05, 0x2a, 0x98, 0x01, 0x0a, 0x0a, 0x52, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x50, 0x4f, - 0x52, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, - 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x53, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x50, 0x4f, - 0x52, 0x54, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x10, - 0x02, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x49, - 0x45, 0x53, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x52, - 0x45, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x49, 0x45, - 0x53, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x54, 0x52, - 0x45, 0x45, 0x10, 0x05, 0x32, 0x62, 0x0a, 0x13, 0x51, 0x75, 0x65, 0x72, 0x79, 0x42, 0x61, 0x63, - 0x6b, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4b, 0x0a, 0x06, 0x49, - 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x1e, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, - 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, - 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xd3, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x76, 0x31, - 0x42, 0x11, 0x51, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, 0x79, 0x72, 0x6f, 0x73, 0x63, - 0x6f, 0x70, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, - 0x64, 0x2f, 0x76, 0x31, 0x3b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, - 0x64, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x51, 0x58, 0x58, 0xaa, 0x02, 0x0f, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0f, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1b, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5c, 0x56, 0x31, 0x5c, - 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x10, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_querybackend_v1_querybackend_proto_rawDescOnce sync.Once - file_querybackend_v1_querybackend_proto_rawDescData = file_querybackend_v1_querybackend_proto_rawDesc -) - -func file_querybackend_v1_querybackend_proto_rawDescGZIP() []byte { - file_querybackend_v1_querybackend_proto_rawDescOnce.Do(func() { - file_querybackend_v1_querybackend_proto_rawDescData = protoimpl.X.CompressGZIP(file_querybackend_v1_querybackend_proto_rawDescData) - }) - return file_querybackend_v1_querybackend_proto_rawDescData -} - -var file_querybackend_v1_querybackend_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_querybackend_v1_querybackend_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_querybackend_v1_querybackend_proto_goTypes = []any{ - (QueryType)(0), // 0: querybackend.v1.QueryType - (ReportType)(0), // 1: querybackend.v1.ReportType - (*InvokeOptions)(nil), // 2: querybackend.v1.InvokeOptions - (*InvokeRequest)(nil), // 3: querybackend.v1.InvokeRequest - (*QueryPlan)(nil), // 4: querybackend.v1.QueryPlan - (*Query)(nil), // 5: querybackend.v1.Query - (*InvokeResponse)(nil), // 6: querybackend.v1.InvokeResponse - (*Diagnostics)(nil), // 7: querybackend.v1.Diagnostics - (*Report)(nil), // 8: querybackend.v1.Report - (*LabelNamesQuery)(nil), // 9: querybackend.v1.LabelNamesQuery - (*LabelNamesReport)(nil), // 10: querybackend.v1.LabelNamesReport - (*LabelValuesQuery)(nil), // 11: querybackend.v1.LabelValuesQuery - (*LabelValuesReport)(nil), // 12: querybackend.v1.LabelValuesReport - (*SeriesLabelsQuery)(nil), // 13: querybackend.v1.SeriesLabelsQuery - (*SeriesLabelsReport)(nil), // 14: querybackend.v1.SeriesLabelsReport - (*TimeSeriesQuery)(nil), // 15: querybackend.v1.TimeSeriesQuery - (*TimeSeriesReport)(nil), // 16: querybackend.v1.TimeSeriesReport - (*TreeQuery)(nil), // 17: querybackend.v1.TreeQuery - (*TreeReport)(nil), // 18: querybackend.v1.TreeReport - (*v1.BlockMeta)(nil), // 19: metastore.v1.BlockMeta - (*v11.Labels)(nil), // 20: types.v1.Labels - (v11.TimeSeriesAggregationType)(0), // 21: types.v1.TimeSeriesAggregationType - (*v11.Series)(nil), // 22: types.v1.Series -} -var file_querybackend_v1_querybackend_proto_depIdxs = []int32{ - 5, // 0: querybackend.v1.InvokeRequest.query:type_name -> querybackend.v1.Query - 4, // 1: querybackend.v1.InvokeRequest.query_plan:type_name -> querybackend.v1.QueryPlan - 2, // 2: querybackend.v1.InvokeRequest.options:type_name -> querybackend.v1.InvokeOptions - 19, // 3: querybackend.v1.QueryPlan.blocks:type_name -> metastore.v1.BlockMeta - 0, // 4: querybackend.v1.Query.query_type:type_name -> querybackend.v1.QueryType - 9, // 5: querybackend.v1.Query.label_names:type_name -> querybackend.v1.LabelNamesQuery - 11, // 6: querybackend.v1.Query.label_values:type_name -> querybackend.v1.LabelValuesQuery - 13, // 7: querybackend.v1.Query.series_labels:type_name -> querybackend.v1.SeriesLabelsQuery - 15, // 8: querybackend.v1.Query.time_series:type_name -> querybackend.v1.TimeSeriesQuery - 17, // 9: querybackend.v1.Query.tree:type_name -> querybackend.v1.TreeQuery - 8, // 10: querybackend.v1.InvokeResponse.reports:type_name -> querybackend.v1.Report - 7, // 11: querybackend.v1.InvokeResponse.diagnostics:type_name -> querybackend.v1.Diagnostics - 1, // 12: querybackend.v1.Report.report_type:type_name -> querybackend.v1.ReportType - 10, // 13: querybackend.v1.Report.label_names:type_name -> querybackend.v1.LabelNamesReport - 12, // 14: querybackend.v1.Report.label_values:type_name -> querybackend.v1.LabelValuesReport - 14, // 15: querybackend.v1.Report.series_labels:type_name -> querybackend.v1.SeriesLabelsReport - 16, // 16: querybackend.v1.Report.time_series:type_name -> querybackend.v1.TimeSeriesReport - 18, // 17: querybackend.v1.Report.tree:type_name -> querybackend.v1.TreeReport - 9, // 18: querybackend.v1.LabelNamesReport.query:type_name -> querybackend.v1.LabelNamesQuery - 11, // 19: querybackend.v1.LabelValuesReport.query:type_name -> querybackend.v1.LabelValuesQuery - 13, // 20: querybackend.v1.SeriesLabelsReport.query:type_name -> querybackend.v1.SeriesLabelsQuery - 20, // 21: querybackend.v1.SeriesLabelsReport.series_labels:type_name -> types.v1.Labels - 21, // 22: querybackend.v1.TimeSeriesQuery.aggregation:type_name -> types.v1.TimeSeriesAggregationType - 15, // 23: querybackend.v1.TimeSeriesReport.query:type_name -> querybackend.v1.TimeSeriesQuery - 22, // 24: querybackend.v1.TimeSeriesReport.time_series:type_name -> types.v1.Series - 17, // 25: querybackend.v1.TreeReport.query:type_name -> querybackend.v1.TreeQuery - 3, // 26: querybackend.v1.QueryBackendService.Invoke:input_type -> querybackend.v1.InvokeRequest - 6, // 27: querybackend.v1.QueryBackendService.Invoke:output_type -> querybackend.v1.InvokeResponse - 27, // [27:28] is the sub-list for method output_type - 26, // [26:27] is the sub-list for method input_type - 26, // [26:26] is the sub-list for extension type_name - 26, // [26:26] is the sub-list for extension extendee - 0, // [0:26] is the sub-list for field type_name -} - -func init() { file_querybackend_v1_querybackend_proto_init() } -func file_querybackend_v1_querybackend_proto_init() { - if File_querybackend_v1_querybackend_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_querybackend_v1_querybackend_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*InvokeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*InvokeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*QueryPlan); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Query); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*InvokeResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Diagnostics); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*Report); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*LabelNamesQuery); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*LabelNamesReport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*LabelValuesQuery); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*LabelValuesReport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*SeriesLabelsQuery); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*SeriesLabelsReport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*TimeSeriesQuery); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*TimeSeriesReport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*TreeQuery); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_querybackend_v1_querybackend_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*TreeReport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_querybackend_v1_querybackend_proto_msgTypes[13].OneofWrappers = []any{} - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_querybackend_v1_querybackend_proto_rawDesc, - NumEnums: 2, - NumMessages: 17, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_querybackend_v1_querybackend_proto_goTypes, - DependencyIndexes: file_querybackend_v1_querybackend_proto_depIdxs, - EnumInfos: file_querybackend_v1_querybackend_proto_enumTypes, - MessageInfos: file_querybackend_v1_querybackend_proto_msgTypes, - }.Build() - File_querybackend_v1_querybackend_proto = out.File - file_querybackend_v1_querybackend_proto_rawDesc = nil - file_querybackend_v1_querybackend_proto_goTypes = nil - file_querybackend_v1_querybackend_proto_depIdxs = nil -} diff --git a/api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.go b/api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.go deleted file mode 100644 index 637f1d3f58..0000000000 --- a/api/gen/proto/go/querybackend/v1/querybackendv1connect/querybackend.connect.go +++ /dev/null @@ -1,114 +0,0 @@ -// Code generated by protoc-gen-connect-go. DO NOT EDIT. -// -// Source: querybackend/v1/querybackend.proto - -package querybackendv1connect - -import ( - connect "connectrpc.com/connect" - context "context" - errors "errors" - v1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" - http "net/http" - strings "strings" -) - -// This is a compile-time assertion to ensure that this generated file and the connect package are -// compatible. If you get a compiler error that this constant is not defined, this code was -// generated with a version of connect newer than the one compiled into your binary. You can fix the -// problem by either regenerating this code with an older version of connect or updating the connect -// version compiled into your binary. -const _ = connect.IsAtLeastVersion1_13_0 - -const ( - // QueryBackendServiceName is the fully-qualified name of the QueryBackendService service. - QueryBackendServiceName = "querybackend.v1.QueryBackendService" -) - -// These constants are the fully-qualified names of the RPCs defined in this package. They're -// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. -// -// Note that these are different from the fully-qualified method names used by -// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to -// reflection-formatted method names, remove the leading slash and convert the remaining slash to a -// period. -const ( - // QueryBackendServiceInvokeProcedure is the fully-qualified name of the QueryBackendService's - // Invoke RPC. - QueryBackendServiceInvokeProcedure = "/querybackend.v1.QueryBackendService/Invoke" -) - -// These variables are the protoreflect.Descriptor objects for the RPCs defined in this package. -var ( - queryBackendServiceServiceDescriptor = v1.File_querybackend_v1_querybackend_proto.Services().ByName("QueryBackendService") - queryBackendServiceInvokeMethodDescriptor = queryBackendServiceServiceDescriptor.Methods().ByName("Invoke") -) - -// QueryBackendServiceClient is a client for the querybackend.v1.QueryBackendService service. -type QueryBackendServiceClient interface { - Invoke(context.Context, *connect.Request[v1.InvokeRequest]) (*connect.Response[v1.InvokeResponse], error) -} - -// NewQueryBackendServiceClient constructs a client for the querybackend.v1.QueryBackendService -// service. By default, it uses the Connect protocol with the binary Protobuf Codec, asks for -// gzipped responses, and sends uncompressed requests. To use the gRPC or gRPC-Web protocols, supply -// the connect.WithGRPC() or connect.WithGRPCWeb() options. -// -// The URL supplied here should be the base URL for the Connect or gRPC server (for example, -// http://api.acme.com or https://acme.com/grpc). -func NewQueryBackendServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) QueryBackendServiceClient { - baseURL = strings.TrimRight(baseURL, "/") - return &queryBackendServiceClient{ - invoke: connect.NewClient[v1.InvokeRequest, v1.InvokeResponse]( - httpClient, - baseURL+QueryBackendServiceInvokeProcedure, - connect.WithSchema(queryBackendServiceInvokeMethodDescriptor), - connect.WithClientOptions(opts...), - ), - } -} - -// queryBackendServiceClient implements QueryBackendServiceClient. -type queryBackendServiceClient struct { - invoke *connect.Client[v1.InvokeRequest, v1.InvokeResponse] -} - -// Invoke calls querybackend.v1.QueryBackendService.Invoke. -func (c *queryBackendServiceClient) Invoke(ctx context.Context, req *connect.Request[v1.InvokeRequest]) (*connect.Response[v1.InvokeResponse], error) { - return c.invoke.CallUnary(ctx, req) -} - -// QueryBackendServiceHandler is an implementation of the querybackend.v1.QueryBackendService -// service. -type QueryBackendServiceHandler interface { - Invoke(context.Context, *connect.Request[v1.InvokeRequest]) (*connect.Response[v1.InvokeResponse], error) -} - -// NewQueryBackendServiceHandler builds an HTTP handler from the service implementation. It returns -// the path on which to mount the handler and the handler itself. -// -// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf -// and JSON codecs. They also support gzip compression. -func NewQueryBackendServiceHandler(svc QueryBackendServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { - queryBackendServiceInvokeHandler := connect.NewUnaryHandler( - QueryBackendServiceInvokeProcedure, - svc.Invoke, - connect.WithSchema(queryBackendServiceInvokeMethodDescriptor), - connect.WithHandlerOptions(opts...), - ) - return "/querybackend.v1.QueryBackendService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case QueryBackendServiceInvokeProcedure: - queryBackendServiceInvokeHandler.ServeHTTP(w, r) - default: - http.NotFound(w, r) - } - }) -} - -// UnimplementedQueryBackendServiceHandler returns CodeUnimplemented from all methods. -type UnimplementedQueryBackendServiceHandler struct{} - -func (UnimplementedQueryBackendServiceHandler) Invoke(context.Context, *connect.Request[v1.InvokeRequest]) (*connect.Response[v1.InvokeResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("querybackend.v1.QueryBackendService.Invoke is not implemented")) -} diff --git a/api/openapiv2/gen/phlare.swagger.json b/api/openapiv2/gen/phlare.swagger.json index 9ddd87bf69..fb8846ef46 100644 --- a/api/openapiv2/gen/phlare.swagger.json +++ b/api/openapiv2/gen/phlare.swagger.json @@ -23,6 +23,9 @@ { "name": "QuerierService" }, + { + "name": "QueryFrontendService" + }, { "name": "QueryBackendService" }, @@ -1416,6 +1419,28 @@ } } }, + "v1PprofQuery": { + "type": "object", + "properties": { + "maxNodes": { + "type": "string", + "format": "int64", + "description": "TODO(kolesnikovae): Go PGO options." + } + } + }, + "v1PprofReport": { + "type": "object", + "properties": { + "query": { + "$ref": "#/definitions/v1PprofQuery" + }, + "pprof": { + "type": "string", + "format": "byte" + } + } + }, "v1ProfileFormat": { "type": "string", "enum": [ @@ -1495,8 +1520,11 @@ "$ref": "#/definitions/v1TimeSeriesQuery" }, "tree": { - "$ref": "#/definitions/v1TreeQuery", - "description": "pprof\n function_details\n call_graph\n top_table\n ..." + "$ref": "#/definitions/v1TreeQuery" + }, + "pprof": { + "$ref": "#/definitions/v1PprofQuery", + "description": "function_details\n call_graph\n top_table\n ..." } } }, @@ -1550,6 +1578,18 @@ }, "description": "Query plan is represented by a DAG, where each node\nmight be either \"merge\" or \"read\" (leaves). Each node\nreferences a range: merge nodes refer to other nodes,\nwhile read nodes refer to the blocks." }, + "v1QueryResponse": { + "type": "object", + "properties": { + "reports": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1Report" + } + } + } + }, "v1QueryScope": { "type": "object", "properties": { @@ -1600,7 +1640,8 @@ "QUERY_LABEL_VALUES", "QUERY_SERIES_LABELS", "QUERY_TIME_SERIES", - "QUERY_TREE" + "QUERY_TREE", + "QUERY_PPROF" ], "default": "QUERY_UNSPECIFIED" }, @@ -1671,6 +1712,9 @@ }, "tree": { "$ref": "#/definitions/v1TreeReport" + }, + "pprof": { + "$ref": "#/definitions/v1PprofReport" } } }, @@ -1682,7 +1726,8 @@ "REPORT_LABEL_VALUES", "REPORT_SERIES_LABELS", "REPORT_TIME_SERIES", - "REPORT_TREE" + "REPORT_TREE", + "REPORT_PPROF" ], "default": "REPORT_UNSPECIFIED" }, @@ -1991,9 +2036,6 @@ "items": { "type": "string" } - }, - "aggregation": { - "$ref": "#/definitions/v1TimeSeriesAggregationType" } } }, diff --git a/api/querybackend/v1/querybackend.proto b/api/query/v1/query.proto similarity index 72% rename from api/querybackend/v1/querybackend.proto rename to api/query/v1/query.proto index c368f7a633..019491787c 100644 --- a/api/querybackend/v1/querybackend.proto +++ b/api/query/v1/query.proto @@ -1,11 +1,37 @@ syntax = "proto3"; -package querybackend.v1; +package query.v1; import "google/v1/profile.proto"; import "metastore/v1/metastore.proto"; import "types/v1/types.proto"; +// QueryFrontendService is supposed to handle both access to the time series +// profiling data through the QueryBackendService and metadata queries through +// the MetastoreService. Example metadata queries: listing datasets (services) +// and their available profile types, accessing tenant statistics, analyzing +// the queries, etc. However, metadata queries should not be mixed with the +// time series profiling queries, therefore the API is to be extended. +// +// QueryFrontendService is supposed to be a public interface, available to +// the external clients, while QueryBackendService and MetastoreService are +// supposed to be private, accessed through the QueryFrontendService. + +service QueryFrontendService { + rpc Query(QueryRequest) returns (QueryResponse) {} +} + +message QueryRequest { + int64 start_time = 1; + int64 end_time = 2; + string label_selector = 3; + repeated Query query = 4; +} + +message QueryResponse { + repeated Report reports = 1; +} + service QueryBackendService { rpc Invoke(InvokeRequest) returns (InvokeResponse) {} } @@ -55,7 +81,7 @@ message Query { SeriesLabelsQuery series_labels = 4; TimeSeriesQuery time_series = 5; TreeQuery tree = 6; - // pprof + PprofQuery pprof = 7; // function_details // call_graph // top_table @@ -69,6 +95,7 @@ enum QueryType { QUERY_SERIES_LABELS = 3; QUERY_TIME_SERIES = 4; QUERY_TREE = 5; + QUERY_PPROF = 6; } message InvokeResponse { @@ -88,6 +115,7 @@ message Report { SeriesLabelsReport series_labels = 4; TimeSeriesReport time_series = 5; TreeReport tree = 6; + PprofReport pprof = 7; } enum ReportType { @@ -97,6 +125,7 @@ enum ReportType { REPORT_SERIES_LABELS = 3; REPORT_TIME_SERIES = 4; REPORT_TREE = 5; + REPORT_PPROF = 6; } message LabelNamesQuery {} @@ -127,7 +156,6 @@ message SeriesLabelsReport { message TimeSeriesQuery { double step = 1; repeated string group_by = 2; - optional types.v1.TimeSeriesAggregationType aggregation = 3; } message TimeSeriesReport { @@ -143,3 +171,13 @@ message TreeReport { TreeQuery query = 1; bytes tree = 2; } + +message PprofQuery { + int64 max_nodes = 1; + // TODO(kolesnikovae): Go PGO options. +} + +message PprofReport { + PprofQuery query = 1; + bytes pprof = 2; +} diff --git a/cmd/pyroscope/help-all.txt.tmpl b/cmd/pyroscope/help-all.txt.tmpl index b691b68c41..33d4f6f967 100644 --- a/cmd/pyroscope/help-all.txt.tmpl +++ b/cmd/pyroscope/help-all.txt.tmpl @@ -766,9 +766,9 @@ Usage of ./pyroscope: -server.grpc-max-concurrent-streams uint Limit on the number of concurrent streams for gRPC calls per client connection (0 = unlimited) (default 100) -server.grpc-max-recv-msg-size-bytes int - Limit on the size of a gRPC message this server can receive (bytes). (default 104857600) + Limit on the size of a gRPC message this server can receive (bytes). (default 4194304) -server.grpc-max-send-msg-size-bytes int - Limit on the size of a gRPC message this server can send (bytes). (default 104857600) + Limit on the size of a gRPC message this server can send (bytes). (default 4194304) -server.grpc-tls-ca-path string GRPC TLS Client CA path. -server.grpc-tls-cert-path string @@ -784,7 +784,7 @@ Usage of ./pyroscope: -server.grpc.keepalive.max-connection-idle duration The duration after which an idle connection should be closed. Default: infinity (default 2562047h47m16.854775807s) -server.grpc.keepalive.min-time-between-pings duration - Minimum amount of time a client should wait before sending a keepalive ping. If client sends keepalive ping more often, server will send GOAWAY and close the connection. (default 1s) + Minimum amount of time a client should wait before sending a keepalive ping. If client sends keepalive ping more often, server will send GOAWAY and close the connection. (default 5m0s) -server.grpc.keepalive.ping-without-stream-allowed If true, server allows keepalive pings even when there are no active streams(RPCs). If false, and client sends ping when there are no active streams, server will send GOAWAY and close the connection. -server.grpc.keepalive.time duration diff --git a/cmd/pyroscope/help.txt.tmpl b/cmd/pyroscope/help.txt.tmpl index 586682d894..c133f3e8d3 100644 --- a/cmd/pyroscope/help.txt.tmpl +++ b/cmd/pyroscope/help.txt.tmpl @@ -208,9 +208,9 @@ Usage of ./pyroscope: -server.grpc-max-concurrent-streams uint Limit on the number of concurrent streams for gRPC calls per client connection (0 = unlimited) (default 100) -server.grpc-max-recv-msg-size-bytes int - Limit on the size of a gRPC message this server can receive (bytes). (default 104857600) + Limit on the size of a gRPC message this server can receive (bytes). (default 4194304) -server.grpc-max-send-msg-size-bytes int - Limit on the size of a gRPC message this server can send (bytes). (default 104857600) + Limit on the size of a gRPC message this server can send (bytes). (default 4194304) -server.grpc-tls-ca-path string GRPC TLS Client CA path. -server.grpc-tls-cert-path string @@ -226,7 +226,7 @@ Usage of ./pyroscope: -server.grpc.keepalive.max-connection-idle duration The duration after which an idle connection should be closed. Default: infinity (default 2562047h47m16.854775807s) -server.grpc.keepalive.min-time-between-pings duration - Minimum amount of time a client should wait before sending a keepalive ping. If client sends keepalive ping more often, server will send GOAWAY and close the connection. (default 1s) + Minimum amount of time a client should wait before sending a keepalive ping. If client sends keepalive ping more often, server will send GOAWAY and close the connection. (default 5m0s) -server.grpc.keepalive.ping-without-stream-allowed If true, server allows keepalive pings even when there are no active streams(RPCs). If false, and client sends ping when there are no active streams, server will send GOAWAY and close the connection. -server.grpc.keepalive.time duration diff --git a/docs/sources/configure-server/reference-configuration-parameters/index.md b/docs/sources/configure-server/reference-configuration-parameters/index.md index 948fb22eb7..f78698d032 100644 --- a/docs/sources/configure-server/reference-configuration-parameters/index.md +++ b/docs/sources/configure-server/reference-configuration-parameters/index.md @@ -421,11 +421,11 @@ grpc_tls_config: # Limit on the size of a gRPC message this server can receive (bytes). # CLI flag: -server.grpc-max-recv-msg-size-bytes -[grpc_server_max_recv_msg_size: | default = 104857600] +[grpc_server_max_recv_msg_size: | default = 4194304] # Limit on the size of a gRPC message this server can send (bytes). # CLI flag: -server.grpc-max-send-msg-size-bytes -[grpc_server_max_send_msg_size: | default = 104857600] +[grpc_server_max_send_msg_size: | default = 4194304] # Limit on the number of concurrent streams for gRPC calls per client connection # (0 = unlimited) @@ -461,7 +461,7 @@ grpc_tls_config: # If client sends keepalive ping more often, server will send GOAWAY and close # the connection. # CLI flag: -server.grpc.keepalive.min-time-between-pings -[grpc_server_min_time_between_pings: | default = 1s] +[grpc_server_min_time_between_pings: | default = 5m] # If true, server allows keepalive pings even when there are no active # streams(RPCs). If false, and client sends ping when there are no active diff --git a/pkg/api/api.go b/pkg/api/api.go index 36d958eb75..bc464e531c 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -242,14 +242,11 @@ func (a *API) RegisterIngesterRing(r http.Handler) { }) } -type QuerierSvc interface { - querierv1connect.QuerierServiceHandler - vcsv1connect.VCSServiceHandler +func (a *API) RegisterQuerierServiceHandler(svc querierv1connect.QuerierServiceHandler) { + querierv1connect.RegisterQuerierServiceHandler(a.server.HTTP, svc, a.connectOptionsAuthLogRecovery()...) } -// RegisterQuerier registers the endpoints associated with the querier. -func (a *API) RegisterQuerier(svc QuerierSvc) { - querierv1connect.RegisterQuerierServiceHandler(a.server.HTTP, svc, a.connectOptionsAuthLogRecovery()...) +func (a *API) RegisterVCSServiceHandler(svc vcsv1connect.VCSServiceHandler) { vcsv1connect.RegisterVCSServiceHandler(a.server.HTTP, svc, a.connectOptionsAuthLogRecovery()...) } @@ -285,8 +282,8 @@ func (a *API) RegisterCompactor(c *compactor.MultitenantCompactor) { a.RegisterRoute("/compactor/ring", http.HandlerFunc(c.RingHandler), false, true, "GET", "POST") } -// RegisterQueryFrontend registers the endpoints associated with the query frontend. -func (a *API) RegisterQueryFrontend(frontendSvc *frontend.Frontend) { +// RegisterFrontendForQuerierHandler registers the endpoints associated with the query frontend. +func (a *API) RegisterFrontendForQuerierHandler(frontendSvc *frontend.Frontend) { frontendpbconnect.RegisterFrontendForQuerierHandler(a.server.HTTP, frontendSvc, a.connectOptionsAuthRecovery()...) } diff --git a/pkg/api/api_experimental.go b/pkg/api/api_experimental.go index d74f1e4b6f..aa03eb6edb 100644 --- a/pkg/api/api_experimental.go +++ b/pkg/api/api_experimental.go @@ -5,11 +5,11 @@ import ( compactorv1 "github.com/grafana/pyroscope/api/gen/proto/go/compactor/v1" metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" segmentwriterv1 "github.com/grafana/pyroscope/api/gen/proto/go/segmentwriter/v1" segmentwriter "github.com/grafana/pyroscope/pkg/experiment/ingester" "github.com/grafana/pyroscope/pkg/experiment/metastore" - "github.com/grafana/pyroscope/pkg/experiment/querybackend" + querybackend "github.com/grafana/pyroscope/pkg/experiment/query_backend" ) // TODO(kolesnikovae): Recovery interceptor. @@ -32,5 +32,5 @@ func (a *API) RegisterMetastore(svc *metastore.Metastore) { } func (a *API) RegisterQueryBackend(svc *querybackend.QueryBackend) { - querybackendv1.RegisterQueryBackendServiceServer(a.server.GRPC, svc) + queryv1.RegisterQueryBackendServiceServer(a.server.GRPC, svc) } diff --git a/pkg/distributor/write_path/router_test.go b/pkg/distributor/write_path/write_path_test.go similarity index 100% rename from pkg/distributor/write_path/router_test.go rename to pkg/distributor/write_path/write_path_test.go diff --git a/pkg/experiment/compactor/compaction_worker.go b/pkg/experiment/compactor/compaction_worker.go index f5aa34d11c..4fac0eb004 100644 --- a/pkg/experiment/compactor/compaction_worker.go +++ b/pkg/experiment/compactor/compaction_worker.go @@ -20,7 +20,7 @@ import ( compactorv1 "github.com/grafana/pyroscope/api/gen/proto/go/compactor/v1" metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" "github.com/grafana/pyroscope/pkg/experiment/metastore/client" - "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + "github.com/grafana/pyroscope/pkg/experiment/query_backend/block" "github.com/grafana/pyroscope/pkg/objstore" ) diff --git a/pkg/experiment/ingester/client/client.go b/pkg/experiment/ingester/client/client.go index 66db736e57..7b77a95e48 100644 --- a/pkg/experiment/ingester/client/client.go +++ b/pkg/experiment/ingester/client/client.go @@ -253,7 +253,9 @@ func (c *Client) pushToInstance( if err != nil { return nil, err } - c.metrics.sentBytes.WithLabelValues(strconv.Itoa(int(req.Shard)), req.TenantId, addr) + c.metrics.sentBytes. + WithLabelValues(strconv.Itoa(int(req.Shard)), req.TenantId, addr). + Observe(float64(len(req.Profile))) return segmentwriterv1.NewSegmentWriterServiceClient(conn).Push(ctx, req) } diff --git a/pkg/experiment/querybackend/backend.go b/pkg/experiment/query_backend/backend.go similarity index 80% rename from pkg/experiment/querybackend/backend.go rename to pkg/experiment/query_backend/backend.go index 40ec1dd132..1a0df9c487 100644 --- a/pkg/experiment/querybackend/backend.go +++ b/pkg/experiment/query_backend/backend.go @@ -1,4 +1,4 @@ -package querybackend +package query_backend import ( "context" @@ -16,8 +16,8 @@ import ( "google.golang.org/grpc/status" metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" - "github.com/grafana/pyroscope/pkg/experiment/querybackend/queryplan" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + queryplan "github.com/grafana/pyroscope/pkg/experiment/query_backend/query_plan" "github.com/grafana/pyroscope/pkg/iter" "github.com/grafana/pyroscope/pkg/util" ) @@ -42,12 +42,12 @@ func (cfg *Config) Validate() error { } type QueryHandler interface { - Invoke(context.Context, *querybackendv1.InvokeRequest) (*querybackendv1.InvokeResponse, error) + Invoke(context.Context, *queryv1.InvokeRequest) (*queryv1.InvokeResponse, error) } type QueryBackend struct { service services.Service - querybackendv1.QueryBackendServiceServer + queryv1.QueryBackendServiceServer config Config logger log.Logger @@ -86,8 +86,8 @@ func (q *QueryBackend) stopping(error) error { return nil } func (q *QueryBackend) Invoke( ctx context.Context, - req *querybackendv1.InvokeRequest, -) (*querybackendv1.InvokeResponse, error) { + req *queryv1.InvokeRequest, +) (*queryv1.InvokeResponse, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "QueryBackend.Invoke") defer span.Finish() @@ -96,7 +96,7 @@ func (q *QueryBackend) Invoke( case queryplan.NodeMerge: return q.merge(ctx, req, r.Children()) case queryplan.NodeRead: - return q.withThrottling(func() (*querybackendv1.InvokeResponse, error) { + return q.withThrottling(func() (*queryv1.InvokeResponse, error) { return q.read(ctx, req, r.Blocks()) }) default: @@ -106,9 +106,9 @@ func (q *QueryBackend) Invoke( func (q *QueryBackend) merge( ctx context.Context, - request *querybackendv1.InvokeRequest, + request *queryv1.InvokeRequest, children iter.Iterator[*queryplan.Node], -) (*querybackendv1.InvokeResponse, error) { +) (*queryv1.InvokeResponse, error) { request.QueryPlan = nil m := newAggregator(request) g, ctx := errgroup.WithContext(ctx) @@ -128,16 +128,16 @@ func (q *QueryBackend) merge( func (q *QueryBackend) read( ctx context.Context, - request *querybackendv1.InvokeRequest, + request *queryv1.InvokeRequest, blocks iter.Iterator[*metastorev1.BlockMeta], -) (*querybackendv1.InvokeResponse, error) { - request.QueryPlan = &querybackendv1.QueryPlan{ +) (*queryv1.InvokeResponse, error) { + request.QueryPlan = &queryv1.QueryPlan{ Blocks: iter.MustSlice(blocks), } return q.blockReader.Invoke(ctx, request) } -func (q *QueryBackend) withThrottling(fn func() (*querybackendv1.InvokeResponse, error)) (*querybackendv1.InvokeResponse, error) { +func (q *QueryBackend) withThrottling(fn func() (*queryv1.InvokeResponse, error)) (*queryv1.InvokeResponse, error) { if q.running.Inc() > q.concurrency { return nil, status.Error(codes.ResourceExhausted, "all minions are busy, please try later") } diff --git a/pkg/experiment/querybackend/block/compaction.go b/pkg/experiment/query_backend/block/compaction.go similarity index 100% rename from pkg/experiment/querybackend/block/compaction.go rename to pkg/experiment/query_backend/block/compaction.go diff --git a/pkg/experiment/querybackend/block/compaction_test.go b/pkg/experiment/query_backend/block/compaction_test.go similarity index 100% rename from pkg/experiment/querybackend/block/compaction_test.go rename to pkg/experiment/query_backend/block/compaction_test.go diff --git a/pkg/experiment/querybackend/block/constants.go b/pkg/experiment/query_backend/block/constants.go similarity index 100% rename from pkg/experiment/querybackend/block/constants.go rename to pkg/experiment/query_backend/block/constants.go diff --git a/pkg/experiment/querybackend/block/dataset.go b/pkg/experiment/query_backend/block/dataset.go similarity index 100% rename from pkg/experiment/querybackend/block/dataset.go rename to pkg/experiment/query_backend/block/dataset.go diff --git a/pkg/experiment/querybackend/block/object.go b/pkg/experiment/query_backend/block/object.go similarity index 100% rename from pkg/experiment/querybackend/block/object.go rename to pkg/experiment/query_backend/block/object.go diff --git a/pkg/experiment/querybackend/block/section_profiles.go b/pkg/experiment/query_backend/block/section_profiles.go similarity index 100% rename from pkg/experiment/querybackend/block/section_profiles.go rename to pkg/experiment/query_backend/block/section_profiles.go diff --git a/pkg/experiment/querybackend/block/section_symbols.go b/pkg/experiment/query_backend/block/section_symbols.go similarity index 100% rename from pkg/experiment/querybackend/block/section_symbols.go rename to pkg/experiment/query_backend/block/section_symbols.go diff --git a/pkg/experiment/querybackend/block/section_tsdb.go b/pkg/experiment/query_backend/block/section_tsdb.go similarity index 100% rename from pkg/experiment/querybackend/block/section_tsdb.go rename to pkg/experiment/query_backend/block/section_tsdb.go diff --git a/pkg/experiment/querybackend/block/testdata/.gitignore b/pkg/experiment/query_backend/block/testdata/.gitignore similarity index 100% rename from pkg/experiment/querybackend/block/testdata/.gitignore rename to pkg/experiment/query_backend/block/testdata/.gitignore diff --git a/pkg/experiment/querybackend/block/testdata/block-metas.json b/pkg/experiment/query_backend/block/testdata/block-metas.json similarity index 100% rename from pkg/experiment/querybackend/block/testdata/block-metas.json rename to pkg/experiment/query_backend/block/testdata/block-metas.json diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQPYDC160REPAD2VN88XN/block.bin b/pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQPYDC160REPAD2VN88XN/block.bin similarity index 100% rename from pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQPYDC160REPAD2VN88XN/block.bin rename to pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQPYDC160REPAD2VN88XN/block.bin diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQRGBK8YFWVV8K1MPRRWM/block.bin b/pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQRGBK8YFWVV8K1MPRRWM/block.bin similarity index 100% rename from pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQRGBK8YFWVV8K1MPRRWM/block.bin rename to pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQRGBK8YFWVV8K1MPRRWM/block.bin diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQRTMSCY4VDYBP5N4N5JK/block.bin b/pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQRTMSCY4VDYBP5N4N5JK/block.bin similarity index 100% rename from pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQRTMSCY4VDYBP5N4N5JK/block.bin rename to pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQRTMSCY4VDYBP5N4N5JK/block.bin diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQTJ3PGF7KB39ARR1BX3Y/block.bin b/pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQTJ3PGF7KB39ARR1BX3Y/block.bin similarity index 100% rename from pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQTJ3PGF7KB39ARR1BX3Y/block.bin rename to pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQTJ3PGF7KB39ARR1BX3Y/block.bin diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQV544TF571FDSK2H692P/block.bin b/pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQV544TF571FDSK2H692P/block.bin similarity index 100% rename from pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQV544TF571FDSK2H692P/block.bin rename to pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQV544TF571FDSK2H692P/block.bin diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQX8DYHSEBK7BAQSCJBMG/block.bin b/pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQX8DYHSEBK7BAQSCJBMG/block.bin similarity index 100% rename from pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQX8DYHSEBK7BAQSCJBMG/block.bin rename to pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQX8DYHSEBK7BAQSCJBMG/block.bin diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQYQVZTPZMMJKE7F2XC47/block.bin b/pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQYQVZTPZMMJKE7F2XC47/block.bin similarity index 100% rename from pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQYQVZTPZMMJKE7F2XC47/block.bin rename to pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQYQVZTPZMMJKE7F2XC47/block.bin diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQZPARDJQ779S1JMV0XQA/block.bin b/pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQZPARDJQ779S1JMV0XQA/block.bin similarity index 100% rename from pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJQZPARDJQ779S1JMV0XQA/block.bin rename to pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJQZPARDJQ779S1JMV0XQA/block.bin diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJR0R3NQS23SDADNA6XHCM/block.bin b/pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJR0R3NQS23SDADNA6XHCM/block.bin similarity index 100% rename from pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJR0R3NQS23SDADNA6XHCM/block.bin rename to pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJR0R3NQS23SDADNA6XHCM/block.bin diff --git a/pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJR31PT3X4NDJC4Q2BHWQ1/block.bin b/pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJR31PT3X4NDJC4Q2BHWQ1/block.bin similarity index 100% rename from pkg/experiment/querybackend/block/testdata/segments/1/anonymous/01J2VJR31PT3X4NDJC4Q2BHWQ1/block.bin rename to pkg/experiment/query_backend/block/testdata/segments/1/anonymous/01J2VJR31PT3X4NDJC4Q2BHWQ1/block.bin diff --git a/pkg/experiment/querybackend/block/writer.go b/pkg/experiment/query_backend/block/writer.go similarity index 100% rename from pkg/experiment/querybackend/block/writer.go rename to pkg/experiment/query_backend/block/writer.go diff --git a/pkg/experiment/querybackend/block_reader.go b/pkg/experiment/query_backend/block_reader.go similarity index 90% rename from pkg/experiment/querybackend/block_reader.go rename to pkg/experiment/query_backend/block_reader.go index 29ef4c3c1d..94cda2f27e 100644 --- a/pkg/experiment/querybackend/block_reader.go +++ b/pkg/experiment/query_backend/block_reader.go @@ -1,4 +1,4 @@ -package querybackend +package query_backend import ( "context" @@ -13,8 +13,8 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" - "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + "github.com/grafana/pyroscope/pkg/experiment/query_backend/block" "github.com/grafana/pyroscope/pkg/objstore" "github.com/grafana/pyroscope/pkg/util" ) @@ -64,8 +64,8 @@ func NewBlockReader(logger log.Logger, storage objstore.Bucket) *BlockReader { func (b *BlockReader) Invoke( ctx context.Context, - req *querybackendv1.InvokeRequest, -) (*querybackendv1.InvokeResponse, error) { + req *queryv1.InvokeRequest, +) (*queryv1.InvokeResponse, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "BlockReader.Invoke") defer span.Finish() vr, err := validateRequest(req) @@ -97,13 +97,13 @@ func (b *BlockReader) Invoke( } type request struct { - src *querybackendv1.InvokeRequest + src *queryv1.InvokeRequest matchers []*labels.Matcher startTime int64 // Unix nano. endTime int64 // Unix nano. } -func validateRequest(req *querybackendv1.InvokeRequest) (*request, error) { +func validateRequest(req *queryv1.InvokeRequest) (*request, error) { if len(req.Query) == 0 { return nil, fmt.Errorf("no queries provided") } diff --git a/pkg/experiment/querybackend/client/client.go b/pkg/experiment/query_backend/client/client.go similarity index 85% rename from pkg/experiment/querybackend/client/client.go rename to pkg/experiment/query_backend/client/client.go index 677a2269ae..f3942ce6de 100644 --- a/pkg/experiment/querybackend/client/client.go +++ b/pkg/experiment/query_backend/client/client.go @@ -9,12 +9,12 @@ import ( "github.com/opentracing/opentracing-go" "google.golang.org/grpc" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" ) type Client struct { service services.Service - grpcClient querybackendv1.QueryBackendServiceClient + grpcClient queryv1.QueryBackendServiceClient } func New(address string, grpcClientConfig grpcclient.Config) (*Client, error) { @@ -23,7 +23,7 @@ func New(address string, grpcClientConfig grpcclient.Config) (*Client, error) { return nil, err } var c Client - c.grpcClient = querybackendv1.NewQueryBackendServiceClient(conn) + c.grpcClient = queryv1.NewQueryBackendServiceClient(conn) c.service = services.NewIdleService(c.starting, c.stopping) return &c, nil } @@ -47,7 +47,7 @@ func (b *Client) Service() services.Service { return b.service } func (b *Client) starting(context.Context) error { return nil } func (b *Client) stopping(error) error { return nil } -func (b *Client) Invoke(ctx context.Context, req *querybackendv1.InvokeRequest) (*querybackendv1.InvokeResponse, error) { +func (b *Client) Invoke(ctx context.Context, req *queryv1.InvokeRequest) (*queryv1.InvokeResponse, error) { return b.grpcClient.Invoke(ctx, req) } diff --git a/pkg/experiment/querybackend/query.go b/pkg/experiment/query_backend/query.go similarity index 77% rename from pkg/experiment/querybackend/query.go rename to pkg/experiment/query_backend/query.go index 3a56b8f064..6676713a87 100644 --- a/pkg/experiment/querybackend/query.go +++ b/pkg/experiment/query_backend/query.go @@ -1,4 +1,4 @@ -package querybackend +package query_backend import ( "context" @@ -10,8 +10,8 @@ import ( "github.com/opentracing/opentracing-go" metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" - "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + "github.com/grafana/pyroscope/pkg/experiment/query_backend/block" ) // TODO(kolesnikovae): We have a procedural definition of our queries, @@ -20,12 +20,12 @@ import ( var ( handlerMutex = new(sync.RWMutex) - queryHandlers = map[querybackendv1.QueryType]queryHandler{} + queryHandlers = map[queryv1.QueryType]queryHandler{} ) -type queryHandler func(*queryContext, *querybackendv1.Query) (*querybackendv1.Report, error) +type queryHandler func(*queryContext, *queryv1.Query) (*queryv1.Report, error) -func registerQueryHandler(t querybackendv1.QueryType, h queryHandler) { +func registerQueryHandler(t queryv1.QueryType, h queryHandler) { handlerMutex.Lock() defer handlerMutex.Unlock() if _, ok := queryHandlers[t]; ok { @@ -34,7 +34,7 @@ func registerQueryHandler(t querybackendv1.QueryType, h queryHandler) { queryHandlers[t] = h } -func getQueryHandler(t querybackendv1.QueryType) (queryHandler, error) { +func getQueryHandler(t queryv1.QueryType) (queryHandler, error) { handlerMutex.RLock() defer handlerMutex.RUnlock() handler, ok := queryHandlers[t] @@ -46,10 +46,10 @@ func getQueryHandler(t querybackendv1.QueryType) (queryHandler, error) { var ( depMutex = new(sync.RWMutex) - queryDependencies = map[querybackendv1.QueryType][]block.Section{} + queryDependencies = map[queryv1.QueryType][]block.Section{} ) -func registerQueryDependencies(t querybackendv1.QueryType, deps ...block.Section) { +func registerQueryDependencies(t queryv1.QueryType, deps ...block.Section) { depMutex.Lock() defer depMutex.Unlock() if _, ok := queryDependencies[t]; ok { @@ -59,8 +59,8 @@ func registerQueryDependencies(t querybackendv1.QueryType, deps ...block.Section } func registerQueryType( - qt querybackendv1.QueryType, - rt querybackendv1.ReportType, + qt queryv1.QueryType, + rt queryv1.ReportType, q queryHandler, a aggregatorProvider, deps ...block.Section, @@ -98,7 +98,7 @@ func newQueryContext( } } -func executeQuery(q *queryContext, query *querybackendv1.Query) (r *querybackendv1.Report, err error) { +func executeQuery(q *queryContext, query *queryv1.Query) (r *queryv1.Report, err error) { var span opentracing.Span span, q.ctx = opentracing.StartSpanFromContext(q.ctx, "executeQuery."+strcase.ToCamel(query.QueryType.String())) defer span.Finish() diff --git a/pkg/experiment/querybackend/query_label_names.go b/pkg/experiment/query_backend/query_label_names.go similarity index 67% rename from pkg/experiment/querybackend/query_label_names.go rename to pkg/experiment/query_backend/query_label_names.go index 995c21ef13..08adc132bf 100644 --- a/pkg/experiment/querybackend/query_label_names.go +++ b/pkg/experiment/query_backend/query_label_names.go @@ -1,4 +1,4 @@ -package querybackend +package query_backend import ( "sort" @@ -6,23 +6,23 @@ import ( "github.com/prometheus/prometheus/model/labels" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" - "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + "github.com/grafana/pyroscope/pkg/experiment/query_backend/block" "github.com/grafana/pyroscope/pkg/model" "github.com/grafana/pyroscope/pkg/phlaredb" ) func init() { registerQueryType( - querybackendv1.QueryType_QUERY_LABEL_NAMES, - querybackendv1.ReportType_REPORT_LABEL_NAMES, + queryv1.QueryType_QUERY_LABEL_NAMES, + queryv1.ReportType_REPORT_LABEL_NAMES, queryLabelNames, newLabelNameAggregator, []block.Section{block.SectionTSDB}..., ) } -func queryLabelNames(q *queryContext, query *querybackendv1.Query) (*querybackendv1.Report, error) { +func queryLabelNames(q *queryContext, query *queryv1.Query) (*queryv1.Report, error) { var names []string var err error if len(q.req.matchers) == 0 { @@ -33,8 +33,8 @@ func queryLabelNames(q *queryContext, query *querybackendv1.Query) (*querybacken if err != nil { return nil, err } - resp := &querybackendv1.Report{ - LabelNames: &querybackendv1.LabelNamesReport{ + resp := &queryv1.Report{ + LabelNames: &queryv1.LabelNamesReport{ Query: query.LabelNames.CloneVT(), LabelNames: names, }, @@ -72,15 +72,15 @@ func labelNamesForMatchers(reader phlaredb.IndexReader, matchers []*labels.Match type labelNameAggregator struct { init sync.Once - query *querybackendv1.LabelNamesQuery + query *queryv1.LabelNamesQuery names *model.LabelMerger } -func newLabelNameAggregator(*querybackendv1.InvokeRequest) aggregator { +func newLabelNameAggregator(*queryv1.InvokeRequest) aggregator { return new(labelNameAggregator) } -func (m *labelNameAggregator) aggregate(report *querybackendv1.Report) error { +func (m *labelNameAggregator) aggregate(report *queryv1.Report) error { r := report.LabelNames m.init.Do(func() { m.query = r.Query.CloneVT() @@ -90,9 +90,9 @@ func (m *labelNameAggregator) aggregate(report *querybackendv1.Report) error { return nil } -func (m *labelNameAggregator) build() *querybackendv1.Report { - return &querybackendv1.Report{ - LabelNames: &querybackendv1.LabelNamesReport{ +func (m *labelNameAggregator) build() *queryv1.Report { + return &queryv1.Report{ + LabelNames: &queryv1.LabelNamesReport{ Query: m.query, LabelNames: m.names.LabelNames(), }, diff --git a/pkg/experiment/querybackend/query_label_values.go b/pkg/experiment/query_backend/query_label_values.go similarity index 69% rename from pkg/experiment/querybackend/query_label_values.go rename to pkg/experiment/query_backend/query_label_values.go index 5f273092d7..7fe5569314 100644 --- a/pkg/experiment/querybackend/query_label_values.go +++ b/pkg/experiment/query_backend/query_label_values.go @@ -1,4 +1,4 @@ -package querybackend +package query_backend import ( "errors" @@ -8,23 +8,23 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" - "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + "github.com/grafana/pyroscope/pkg/experiment/query_backend/block" "github.com/grafana/pyroscope/pkg/model" "github.com/grafana/pyroscope/pkg/phlaredb" ) func init() { registerQueryType( - querybackendv1.QueryType_QUERY_LABEL_VALUES, - querybackendv1.ReportType_REPORT_LABEL_VALUES, + queryv1.QueryType_QUERY_LABEL_VALUES, + queryv1.ReportType_REPORT_LABEL_VALUES, queryLabelValues, newLabelValueAggregator, []block.Section{block.SectionTSDB}..., ) } -func queryLabelValues(q *queryContext, query *querybackendv1.Query) (*querybackendv1.Report, error) { +func queryLabelValues(q *queryContext, query *queryv1.Query) (*queryv1.Report, error) { var values []string var err error if len(q.req.matchers) == 0 { @@ -35,8 +35,8 @@ func queryLabelValues(q *queryContext, query *querybackendv1.Query) (*querybacke if err != nil { return nil, err } - resp := &querybackendv1.Report{ - LabelValues: &querybackendv1.LabelValuesReport{ + resp := &queryv1.Report{ + LabelValues: &queryv1.LabelValuesReport{ Query: query.LabelValues.CloneVT(), LabelValues: values, }, @@ -75,15 +75,15 @@ func labelValuesForMatchers(reader phlaredb.IndexReader, name string, matchers [ type labelValueAggregator struct { init sync.Once - query *querybackendv1.LabelValuesQuery + query *queryv1.LabelValuesQuery values *model.LabelMerger } -func newLabelValueAggregator(*querybackendv1.InvokeRequest) aggregator { +func newLabelValueAggregator(*queryv1.InvokeRequest) aggregator { return new(labelValueAggregator) } -func (m *labelValueAggregator) aggregate(report *querybackendv1.Report) error { +func (m *labelValueAggregator) aggregate(report *queryv1.Report) error { r := report.LabelValues m.init.Do(func() { m.query = r.Query.CloneVT() @@ -93,9 +93,9 @@ func (m *labelValueAggregator) aggregate(report *querybackendv1.Report) error { return nil } -func (m *labelValueAggregator) build() *querybackendv1.Report { - return &querybackendv1.Report{ - LabelValues: &querybackendv1.LabelValuesReport{ +func (m *labelValueAggregator) build() *queryv1.Report { + return &queryv1.Report{ + LabelValues: &queryv1.LabelValuesReport{ Query: m.query, LabelValues: m.values.LabelValues(), }, diff --git a/pkg/experiment/querybackend/queryplan/query_plan.go b/pkg/experiment/query_backend/query_plan/query_plan.go similarity index 97% rename from pkg/experiment/querybackend/queryplan/query_plan.go rename to pkg/experiment/query_backend/query_plan/query_plan.go index bc33f3016e..055f582d2f 100644 --- a/pkg/experiment/querybackend/queryplan/query_plan.go +++ b/pkg/experiment/query_backend/query_plan/query_plan.go @@ -1,4 +1,4 @@ -package queryplan +package query_plan import ( "fmt" @@ -9,7 +9,7 @@ import ( "unsafe" metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" "github.com/grafana/pyroscope/pkg/iter" ) @@ -53,7 +53,7 @@ type node struct { len uint32 } -func Open(p *querybackendv1.QueryPlan) *QueryPlan { +func Open(p *queryv1.QueryPlan) *QueryPlan { if len(p.Blocks) == 0 { return new(QueryPlan) } @@ -233,8 +233,8 @@ func (n *Node) Plan() *QueryPlan { } } -func (p *QueryPlan) Proto() *querybackendv1.QueryPlan { - return &querybackendv1.QueryPlan{ +func (p *QueryPlan) Proto() *queryv1.QueryPlan { + return &queryv1.QueryPlan{ Graph: unsafe.Slice((*uint32)(unsafe.Pointer(unsafe.SliceData(p.nodes))), len(p.nodes)*3), Blocks: p.blocks, } diff --git a/pkg/experiment/querybackend/queryplan/query_plan_test.go b/pkg/experiment/query_backend/query_plan/query_plan_test.go similarity index 94% rename from pkg/experiment/querybackend/queryplan/query_plan_test.go rename to pkg/experiment/query_backend/query_plan/query_plan_test.go index 488e2f7ce6..fdeedd720c 100644 --- a/pkg/experiment/querybackend/queryplan/query_plan_test.go +++ b/pkg/experiment/query_backend/query_plan/query_plan_test.go @@ -1,4 +1,4 @@ -package queryplan +package query_plan import ( "bytes" @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" "github.com/grafana/pyroscope/pkg/iter" ) @@ -69,7 +69,7 @@ read [id:"01J2JZ0A7MPZMR0R745HAZD1S9" id:"01J2JZ0RY9WCA01S322EG201R8"] ` p := Build(blocks, 2, 5).Proto() - n := []*querybackendv1.QueryPlan{p} + n := []*queryv1.QueryPlan{p} var x *QueryPlan for len(n) > 0 { x, n = Open(n[0]), n[1:] @@ -103,7 +103,7 @@ func Test_Plan_skip_top_merge(t *testing.T) { expected := `[id:"01J2JY1K5J4T2WNDV05CHVFCA9" id:"01J2JY21VVYYV4PMDGK4TVMZ6H"]` p := Build(blocks, 2, 5).Proto() - n := []*querybackendv1.QueryPlan{p} + n := []*queryv1.QueryPlan{p} var x *QueryPlan for len(n) > 0 { x, n = Open(n[0]), n[1:] diff --git a/pkg/experiment/querybackend/queryplan/testdata/plan.txt b/pkg/experiment/query_backend/query_plan/testdata/plan.txt similarity index 100% rename from pkg/experiment/querybackend/queryplan/testdata/plan.txt rename to pkg/experiment/query_backend/query_plan/testdata/plan.txt diff --git a/pkg/experiment/query_backend/query_pprof.go b/pkg/experiment/query_backend/query_pprof.go new file mode 100644 index 0000000000..ff29a21535 --- /dev/null +++ b/pkg/experiment/query_backend/query_pprof.go @@ -0,0 +1,97 @@ +package query_backend + +import ( + "sync" + + "github.com/grafana/dskit/runutil" + + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + "github.com/grafana/pyroscope/pkg/experiment/query_backend/block" + parquetquery "github.com/grafana/pyroscope/pkg/phlaredb/query" + v1 "github.com/grafana/pyroscope/pkg/phlaredb/schemas/v1" + "github.com/grafana/pyroscope/pkg/phlaredb/symdb" + "github.com/grafana/pyroscope/pkg/pprof" +) + +func init() { + registerQueryType( + queryv1.QueryType_QUERY_PPROF, + queryv1.ReportType_REPORT_PPROF, + queryPprof, + newPprofAggregator, + []block.Section{ + block.SectionTSDB, + block.SectionProfiles, + block.SectionSymbols, + }..., + ) +} + +func queryPprof(q *queryContext, query *queryv1.Query) (*queryv1.Report, error) { + entries, err := profileEntryIterator(q) + if err != nil { + return nil, err + } + defer runutil.CloseWithErrCapture(&err, entries, "failed to close profile entry iterator") + + var columns v1.SampleColumns + if err = columns.Resolve(q.ds.Profiles().Schema()); err != nil { + return nil, err + } + + profiles := parquetquery.NewRepeatedRowIterator(q.ctx, entries, q.ds.Profiles().RowGroups(), + columns.StacktraceID.ColumnIndex, + columns.Value.ColumnIndex) + defer runutil.CloseWithErrCapture(&err, profiles, "failed to close profile stream") + + resolver := symdb.NewResolver(q.ctx, q.ds.Symbols(), + // TODO(kolesnikovae): Stack trace selector. + symdb.WithResolverMaxNodes(query.Pprof.MaxNodes)) + defer resolver.Release() + + for profiles.Next() { + p := profiles.At() + resolver.AddSamplesFromParquetRow(p.Row.Partition, p.Values[0], p.Values[1]) + } + if err = profiles.Err(); err != nil { + return nil, err + } + + profile, err := resolver.Pprof() + if err != nil { + return nil, err + } + + resp := &queryv1.Report{ + Pprof: &queryv1.PprofReport{ + Query: query.Pprof.CloneVT(), + Pprof: pprof.MustMarshal(profile, true), + }, + } + return resp, nil +} + +type pprofAggregator struct { + init sync.Once + query *queryv1.PprofQuery + profile pprof.ProfileMerge +} + +func newPprofAggregator(*queryv1.InvokeRequest) aggregator { return new(pprofAggregator) } + +func (a *pprofAggregator) aggregate(report *queryv1.Report) error { + r := report.Pprof + a.init.Do(func() { + a.query = r.Query.CloneVT() + }) + return a.profile.MergeBytes(r.Pprof) +} + +func (a *pprofAggregator) build() *queryv1.Report { + return &queryv1.Report{ + Pprof: &queryv1.PprofReport{ + Query: a.query, + Pprof: pprof.MustMarshal(a.profile.Profile(), true), + }, + } +} diff --git a/pkg/experiment/querybackend/query_profile_entry.go b/pkg/experiment/query_backend/query_profile_entry.go similarity index 99% rename from pkg/experiment/querybackend/query_profile_entry.go rename to pkg/experiment/query_backend/query_profile_entry.go index f6b9ef578b..cf76362dca 100644 --- a/pkg/experiment/querybackend/query_profile_entry.go +++ b/pkg/experiment/query_backend/query_profile_entry.go @@ -1,4 +1,4 @@ -package querybackend +package query_backend import ( "github.com/parquet-go/parquet-go" diff --git a/pkg/experiment/querybackend/query_series_labels.go b/pkg/experiment/query_backend/query_series_labels.go similarity index 68% rename from pkg/experiment/querybackend/query_series_labels.go rename to pkg/experiment/query_backend/query_series_labels.go index 1fa1e2a3a4..647c4b3cc4 100644 --- a/pkg/experiment/querybackend/query_series_labels.go +++ b/pkg/experiment/query_backend/query_series_labels.go @@ -1,13 +1,13 @@ -package querybackend +package query_backend import ( "sync" "github.com/prometheus/prometheus/model/labels" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" - "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + "github.com/grafana/pyroscope/pkg/experiment/query_backend/block" "github.com/grafana/pyroscope/pkg/model" "github.com/grafana/pyroscope/pkg/phlaredb" "github.com/grafana/pyroscope/pkg/phlaredb/tsdb/index" @@ -15,15 +15,15 @@ import ( func init() { registerQueryType( - querybackendv1.QueryType_QUERY_SERIES_LABELS, - querybackendv1.ReportType_REPORT_SERIES_LABELS, + queryv1.QueryType_QUERY_SERIES_LABELS, + queryv1.ReportType_REPORT_SERIES_LABELS, querySeriesLabels, newSeriesLabelsAggregator, []block.Section{block.SectionTSDB}..., ) } -func querySeriesLabels(q *queryContext, query *querybackendv1.Query) (*querybackendv1.Report, error) { +func querySeriesLabels(q *queryContext, query *queryv1.Query) (*queryv1.Report, error) { postings, err := getPostings(q.ds.Index(), q.req.matchers...) if err != nil { return nil, err @@ -47,8 +47,8 @@ func querySeriesLabels(q *queryContext, query *querybackendv1.Query) (*queryback series[i] = &typesv1.Labels{Labels: s} i++ } - resp := &querybackendv1.Report{ - SeriesLabels: &querybackendv1.SeriesLabelsReport{ + resp := &queryv1.Report{ + SeriesLabels: &queryv1.SeriesLabelsReport{ Query: query.SeriesLabels.CloneVT(), SeriesLabels: series, }, @@ -66,15 +66,15 @@ func getPostings(reader phlaredb.IndexReader, matchers ...*labels.Matcher) (inde type seriesLabelsAggregator struct { init sync.Once - query *querybackendv1.SeriesLabelsQuery + query *queryv1.SeriesLabelsQuery series *model.LabelMerger } -func newSeriesLabelsAggregator(*querybackendv1.InvokeRequest) aggregator { +func newSeriesLabelsAggregator(*queryv1.InvokeRequest) aggregator { return new(seriesLabelsAggregator) } -func (a *seriesLabelsAggregator) aggregate(report *querybackendv1.Report) error { +func (a *seriesLabelsAggregator) aggregate(report *queryv1.Report) error { r := report.SeriesLabels a.init.Do(func() { a.query = r.Query.CloneVT() @@ -84,9 +84,9 @@ func (a *seriesLabelsAggregator) aggregate(report *querybackendv1.Report) error return nil } -func (a *seriesLabelsAggregator) build() *querybackendv1.Report { - return &querybackendv1.Report{ - SeriesLabels: &querybackendv1.SeriesLabelsReport{ +func (a *seriesLabelsAggregator) build() *queryv1.Report { + return &queryv1.Report{ + SeriesLabels: &queryv1.SeriesLabelsReport{ Query: a.query, SeriesLabels: a.series.SeriesLabels(), }, diff --git a/pkg/experiment/querybackend/query_time_series.go b/pkg/experiment/query_backend/query_time_series.go similarity index 75% rename from pkg/experiment/querybackend/query_time_series.go rename to pkg/experiment/query_backend/query_time_series.go index 7a09ee6855..740348ed3e 100644 --- a/pkg/experiment/querybackend/query_time_series.go +++ b/pkg/experiment/query_backend/query_time_series.go @@ -1,4 +1,4 @@ -package querybackend +package query_backend import ( "strings" @@ -7,9 +7,9 @@ import ( "github.com/grafana/dskit/runutil" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" - "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + "github.com/grafana/pyroscope/pkg/experiment/query_backend/block" phlaremodel "github.com/grafana/pyroscope/pkg/model" parquetquery "github.com/grafana/pyroscope/pkg/phlaredb/query" schemav1 "github.com/grafana/pyroscope/pkg/phlaredb/schemas/v1" @@ -17,8 +17,8 @@ import ( func init() { registerQueryType( - querybackendv1.QueryType_QUERY_TIME_SERIES, - querybackendv1.ReportType_REPORT_TIME_SERIES, + queryv1.QueryType_QUERY_TIME_SERIES, + queryv1.ReportType_REPORT_TIME_SERIES, queryTimeSeries, newTimeSeriesAggregator, []block.Section{ @@ -28,7 +28,7 @@ func init() { ) } -func queryTimeSeries(q *queryContext, query *querybackendv1.Query) (r *querybackendv1.Report, err error) { +func queryTimeSeries(q *queryContext, query *queryv1.Query) (r *queryv1.Report, err error) { entries, err := profileEntryIterator(q, query.TimeSeries.GroupBy...) if err != nil { return nil, err @@ -57,8 +57,8 @@ func queryTimeSeries(q *queryContext, query *querybackendv1.Query) (r *queryback return nil, err } - resp := &querybackendv1.Report{ - TimeSeries: &querybackendv1.TimeSeriesReport{ + resp := &queryv1.Report{ + TimeSeries: &queryv1.TimeSeriesReport{ Query: query.TimeSeries.CloneVT(), TimeSeries: builder.Build(), }, @@ -71,18 +71,18 @@ type timeSeriesAggregator struct { init sync.Once startTime int64 endTime int64 - query *querybackendv1.TimeSeriesQuery + query *queryv1.TimeSeriesQuery series *phlaremodel.TimeSeriesMerger } -func newTimeSeriesAggregator(req *querybackendv1.InvokeRequest) aggregator { +func newTimeSeriesAggregator(req *queryv1.InvokeRequest) aggregator { return &timeSeriesAggregator{ startTime: req.StartTime, endTime: req.EndTime, } } -func (a *timeSeriesAggregator) aggregate(report *querybackendv1.Report) error { +func (a *timeSeriesAggregator) aggregate(report *queryv1.Report) error { r := report.TimeSeries a.init.Do(func() { a.series = phlaremodel.NewTimeSeriesMerger(true) @@ -92,15 +92,15 @@ func (a *timeSeriesAggregator) aggregate(report *querybackendv1.Report) error { return nil } -func (a *timeSeriesAggregator) build() *querybackendv1.Report { +func (a *timeSeriesAggregator) build() *queryv1.Report { // TODO(kolesnikovae): Average aggregation should be implemented in // the way that it can be distributed (count + sum), and should be done // at "aggregate" call. sum := typesv1.TimeSeriesAggregationType_TIME_SERIES_AGGREGATION_TYPE_SUM stepMilli := time.Duration(a.query.GetStep() * float64(time.Second)).Milliseconds() seriesIterator := phlaremodel.NewTimeSeriesMergeIterator(a.series.TimeSeries()) - return &querybackendv1.Report{ - TimeSeries: &querybackendv1.TimeSeriesReport{ + return &queryv1.Report{ + TimeSeries: &queryv1.TimeSeriesReport{ Query: a.query, TimeSeries: phlaremodel.RangeSeries( seriesIterator, diff --git a/pkg/experiment/querybackend/query_tree.go b/pkg/experiment/query_backend/query_tree.go similarity index 67% rename from pkg/experiment/querybackend/query_tree.go rename to pkg/experiment/query_backend/query_tree.go index b440244342..b246e3f1fd 100644 --- a/pkg/experiment/querybackend/query_tree.go +++ b/pkg/experiment/query_backend/query_tree.go @@ -1,12 +1,12 @@ -package querybackend +package query_backend import ( "sync" "github.com/grafana/dskit/runutil" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" - "github.com/grafana/pyroscope/pkg/experiment/querybackend/block" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + "github.com/grafana/pyroscope/pkg/experiment/query_backend/block" "github.com/grafana/pyroscope/pkg/model" parquetquery "github.com/grafana/pyroscope/pkg/phlaredb/query" v1 "github.com/grafana/pyroscope/pkg/phlaredb/schemas/v1" @@ -15,8 +15,8 @@ import ( func init() { registerQueryType( - querybackendv1.QueryType_QUERY_TREE, - querybackendv1.ReportType_REPORT_TREE, + queryv1.QueryType_QUERY_TREE, + queryv1.ReportType_REPORT_TREE, queryTree, newTreeAggregator, []block.Section{ @@ -27,7 +27,7 @@ func init() { ) } -func queryTree(q *queryContext, query *querybackendv1.Query) (*querybackendv1.Report, error) { +func queryTree(q *queryContext, query *queryv1.Query) (*queryv1.Report, error) { entries, err := profileEntryIterator(q) if err != nil { return nil, err @@ -44,8 +44,10 @@ func queryTree(q *queryContext, query *querybackendv1.Query) (*querybackendv1.Re columns.Value.ColumnIndex) defer runutil.CloseWithErrCapture(&err, profiles, "failed to close profile stream") - resolver := symdb.NewResolver(q.ctx, q.ds.Symbols()) + resolver := symdb.NewResolver(q.ctx, q.ds.Symbols(), + symdb.WithResolverMaxNodes(query.Tree.GetMaxNodes())) defer resolver.Release() + for profiles.Next() { p := profiles.At() resolver.AddSamplesFromParquetRow(p.Row.Partition, p.Values[0], p.Values[1]) @@ -59,8 +61,8 @@ func queryTree(q *queryContext, query *querybackendv1.Query) (*querybackendv1.Re return nil, err } - resp := &querybackendv1.Report{ - Tree: &querybackendv1.TreeReport{ + resp := &queryv1.Report{ + Tree: &queryv1.TreeReport{ Query: query.Tree.CloneVT(), Tree: tree.Bytes(query.Tree.GetMaxNodes()), }, @@ -70,13 +72,13 @@ func queryTree(q *queryContext, query *querybackendv1.Query) (*querybackendv1.Re type treeAggregator struct { init sync.Once - query *querybackendv1.TreeQuery + query *queryv1.TreeQuery tree *model.TreeMerger } -func newTreeAggregator(*querybackendv1.InvokeRequest) aggregator { return new(treeAggregator) } +func newTreeAggregator(*queryv1.InvokeRequest) aggregator { return new(treeAggregator) } -func (a *treeAggregator) aggregate(report *querybackendv1.Report) error { +func (a *treeAggregator) aggregate(report *queryv1.Report) error { r := report.Tree a.init.Do(func() { a.tree = model.NewTreeMerger() @@ -85,9 +87,9 @@ func (a *treeAggregator) aggregate(report *querybackendv1.Report) error { return a.tree.MergeTreeBytes(r.Tree) } -func (a *treeAggregator) build() *querybackendv1.Report { - return &querybackendv1.Report{ - Tree: &querybackendv1.TreeReport{ +func (a *treeAggregator) build() *queryv1.Report { + return &queryv1.Report{ + Tree: &queryv1.TreeReport{ Query: a.query, Tree: a.tree.Tree().Bytes(a.query.GetMaxNodes()), }, diff --git a/pkg/experiment/querybackend/report_aggregator.go b/pkg/experiment/query_backend/report_aggregator.go similarity index 62% rename from pkg/experiment/querybackend/report_aggregator.go rename to pkg/experiment/query_backend/report_aggregator.go index 44be8d83f9..27f53a5bcf 100644 --- a/pkg/experiment/querybackend/report_aggregator.go +++ b/pkg/experiment/query_backend/report_aggregator.go @@ -1,29 +1,29 @@ -package querybackend +package query_backend import ( "fmt" "sync" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" ) var ( aggregatorMutex = new(sync.RWMutex) - aggregators = map[querybackendv1.ReportType]aggregatorProvider{} - queryReportType = map[querybackendv1.QueryType]querybackendv1.ReportType{} + aggregators = map[queryv1.ReportType]aggregatorProvider{} + queryReportType = map[queryv1.QueryType]queryv1.ReportType{} ) -type aggregatorProvider func(*querybackendv1.InvokeRequest) aggregator +type aggregatorProvider func(*queryv1.InvokeRequest) aggregator type aggregator interface { // The method is called concurrently. - aggregate(*querybackendv1.Report) error + aggregate(*queryv1.Report) error // build the aggregation result. It's guaranteed that aggregate() // was called at least once before report() is called. - build() *querybackendv1.Report + build() *queryv1.Report } -func registerAggregator(t querybackendv1.ReportType, ap aggregatorProvider) { +func registerAggregator(t queryv1.ReportType, ap aggregatorProvider) { aggregatorMutex.Lock() defer aggregatorMutex.Unlock() _, ok := aggregators[t] @@ -33,7 +33,7 @@ func registerAggregator(t querybackendv1.ReportType, ap aggregatorProvider) { aggregators[t] = ap } -func getAggregator(r *querybackendv1.InvokeRequest, x *querybackendv1.Report) (aggregator, error) { +func getAggregator(r *queryv1.InvokeRequest, x *queryv1.Report) (aggregator, error) { aggregatorMutex.RLock() defer aggregatorMutex.RUnlock() a, ok := aggregators[x.ReportType] @@ -43,7 +43,7 @@ func getAggregator(r *querybackendv1.InvokeRequest, x *querybackendv1.Report) (a return a(r), nil } -func registerQueryReportType(q querybackendv1.QueryType, r querybackendv1.ReportType) { +func registerQueryReportType(q queryv1.QueryType, r queryv1.ReportType) { aggregatorMutex.Lock() defer aggregatorMutex.Unlock() v, ok := queryReportType[q] @@ -53,7 +53,7 @@ func registerQueryReportType(q querybackendv1.QueryType, r querybackendv1.Report queryReportType[q] = r } -func QueryReportType(q querybackendv1.QueryType) querybackendv1.ReportType { +func QueryReportType(q queryv1.QueryType) queryv1.ReportType { aggregatorMutex.RLock() defer aggregatorMutex.RUnlock() r, ok := queryReportType[q] @@ -64,21 +64,21 @@ func QueryReportType(q querybackendv1.QueryType) querybackendv1.ReportType { } type reportAggregator struct { - request *querybackendv1.InvokeRequest + request *queryv1.InvokeRequest sm sync.Mutex - staged map[querybackendv1.ReportType]*querybackendv1.Report - aggregators map[querybackendv1.ReportType]aggregator + staged map[queryv1.ReportType]*queryv1.Report + aggregators map[queryv1.ReportType]aggregator } -func newAggregator(request *querybackendv1.InvokeRequest) *reportAggregator { +func newAggregator(request *queryv1.InvokeRequest) *reportAggregator { return &reportAggregator{ request: request, - staged: make(map[querybackendv1.ReportType]*querybackendv1.Report), - aggregators: make(map[querybackendv1.ReportType]aggregator), + staged: make(map[queryv1.ReportType]*queryv1.Report), + aggregators: make(map[queryv1.ReportType]aggregator), } } -func (ra *reportAggregator) aggregateResponse(resp *querybackendv1.InvokeResponse, err error) error { +func (ra *reportAggregator) aggregateResponse(resp *queryv1.InvokeResponse, err error) error { if err != nil { return err } @@ -90,7 +90,7 @@ func (ra *reportAggregator) aggregateResponse(resp *querybackendv1.InvokeRespons return nil } -func (ra *reportAggregator) aggregateReport(r *querybackendv1.Report) (err error) { +func (ra *reportAggregator) aggregateReport(r *queryv1.Report) (err error) { if r == nil { return nil } @@ -118,7 +118,7 @@ func (ra *reportAggregator) aggregateReport(r *querybackendv1.Report) (err error return ra.aggregateReportNoCheck(r) } -func (ra *reportAggregator) aggregateReportNoCheck(report *querybackendv1.Report) (err error) { +func (ra *reportAggregator) aggregateReportNoCheck(report *queryv1.Report) (err error) { a, ok := ra.aggregators[report.ReportType] if !ok { a, err = getAggregator(ra.request, report) @@ -141,15 +141,15 @@ func (ra *reportAggregator) aggregateStaged() error { return nil } -func (ra *reportAggregator) response() (*querybackendv1.InvokeResponse, error) { +func (ra *reportAggregator) response() (*queryv1.InvokeResponse, error) { if err := ra.aggregateStaged(); err != nil { return nil, err } - reports := make([]*querybackendv1.Report, 0, len(ra.staged)) + reports := make([]*queryv1.Report, 0, len(ra.staged)) for t, a := range ra.aggregators { r := a.build() r.ReportType = t reports = append(reports, r) } - return &querybackendv1.InvokeResponse{Reports: reports}, nil + return &queryv1.InvokeResponse{Reports: reports}, nil } diff --git a/pkg/experiment/queryfrontend/frontend_meta.go b/pkg/experiment/queryfrontend/frontend_meta.go deleted file mode 100644 index 8917aa430f..0000000000 --- a/pkg/experiment/queryfrontend/frontend_meta.go +++ /dev/null @@ -1,188 +0,0 @@ -package queryfrontend - -import ( - "context" - "fmt" - "math/rand" - "slices" - "strings" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/promql/parser" - - metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" - querybackendv1 "github.com/grafana/pyroscope/api/gen/proto/go/querybackend/v1" - metastoreclient "github.com/grafana/pyroscope/pkg/experiment/metastore/client" - "github.com/grafana/pyroscope/pkg/experiment/querybackend" - querybackendclient "github.com/grafana/pyroscope/pkg/experiment/querybackend/client" - "github.com/grafana/pyroscope/pkg/experiment/querybackend/queryplan" - phlaremodel "github.com/grafana/pyroscope/pkg/model" -) - -func ListMetadata( - ctx context.Context, - client *metastoreclient.Client, - logger log.Logger, - tenants []string, - startTime, endTime int64, - query string, -) ([]*metastorev1.BlockMeta, error) { - _ = level.Info(logger).Log("msg", "listing metadata", - "tenants", strings.Join(tenants, ","), - "start", startTime, - "end", endTime, - "query", query, - ) - resp, err := client.QueryMetadata(ctx, &metastorev1.QueryMetadataRequest{ - TenantId: tenants, - StartTime: startTime, - EndTime: endTime, - Query: query, - }) - if err != nil { - // TODO: Not sure if we want to pass it through - return nil, err - } - // TODO: Metrics - printStats(logger, resp.Blocks) - return resp.Blocks, nil -} - -func printStats(logger log.Logger, blocks []*metastorev1.BlockMeta) { - type blockMetaStats struct { - level uint32 - minTime int64 - maxTime int64 - size uint64 - count int - } - m := make(map[uint32]*blockMetaStats) - for _, b := range blocks { - s, ok := m[b.CompactionLevel] - if !ok { - s = &blockMetaStats{level: b.CompactionLevel} - m[b.CompactionLevel] = s - } - for _, x := range b.Datasets { - s.size += x.Size - } - s.count++ - } - sorted := make([]*blockMetaStats, 0, len(m)) - for _, s := range m { - sorted = append(sorted, s) - } - slices.SortFunc(sorted, func(a, b *blockMetaStats) int { - return int(a.level - b.level) - }) - fields := make([]interface{}, 0, 4+len(sorted)*2) - fields = append(fields, "msg", "block metadata list", "blocks_total", fmt.Sprint(len(blocks))) - for _, s := range sorted { - fields = append(fields, - fmt.Sprintf("l%d_blocks", s.level), fmt.Sprint(s.count), - fmt.Sprintf("l%d_size", s.level), fmt.Sprint(s.size), - ) - } - _ = level.Info(logger).Log(fields...) -} - -var xrand = rand.New(rand.NewSource(4349676827832284783)) - -func Query( - ctx context.Context, - startTime, endTime int64, - tenants []string, - labelSelector string, - q *querybackendv1.Query, - mc *metastoreclient.Client, - qc *querybackendclient.Client, - logger log.Logger, -) (*querybackendv1.Report, error) { - blocks, err := ListMetadata(ctx, mc, logger, tenants, startTime, endTime, labelSelector) - if err != nil { - return nil, err - } - if len(blocks) == 0 { - return nil, nil - } - // Randomize the order of blocks to avoid hotspots. - xrand.Shuffle(len(blocks), func(i, j int) { - blocks[i], blocks[j] = blocks[j], blocks[i] - }) - // TODO: Params. - p := queryplan.Build(blocks, 2, 10) - resp, err := qc.Invoke(ctx, &querybackendv1.InvokeRequest{ - Tenant: tenants, - StartTime: startTime, - EndTime: endTime, - LabelSelector: labelSelector, - Options: &querybackendv1.InvokeOptions{}, - QueryPlan: p.Proto(), - Query: []*querybackendv1.Query{q}, - }) - if err != nil { - return nil, err - } - return findReport(querybackend.QueryReportType(q.QueryType), resp.Reports), nil -} - -func BuildLabelSelectorFromMatchers(matchers []string) (string, error) { - parsed, err := parseMatchers(matchers) - if err != nil { - return "", fmt.Errorf("parsing label selector: %w", err) - } - return matchersToLabelSelector(parsed), nil -} - -func BuildLabelSelectorWithProfileType(labelSelector, profileTypeID string) (string, error) { - matchers, err := parser.ParseMetricSelector(labelSelector) - if err != nil { - return "", fmt.Errorf("parsing label selector %q: %w", labelSelector, err) - } - profileType, err := phlaremodel.ParseProfileTypeSelector(profileTypeID) - if err != nil { - return "", fmt.Errorf("parsing profile type ID %q: %w", profileTypeID, err) - } - matchers = append(matchers, phlaremodel.SelectorFromProfileType(profileType)) - return matchersToLabelSelector(matchers), nil -} - -func parseMatchers(matchers []string) ([]*labels.Matcher, error) { - parsed := make([]*labels.Matcher, 0, len(matchers)) - for _, m := range matchers { - s, err := parser.ParseMetricSelector(m) - if err != nil { - return nil, fmt.Errorf("failed to parse label selector %q: %w", s, err) - } - parsed = append(parsed, s...) - } - return parsed, nil -} - -func matchersToLabelSelector(matchers []*labels.Matcher) string { - var q strings.Builder - q.WriteByte('{') - for i, m := range matchers { - if i > 0 { - q.WriteByte(',') - } - q.WriteString(m.Name) - q.WriteString(m.Type.String()) - q.WriteByte('"') - q.WriteString(m.Value) - q.WriteByte('"') - } - q.WriteByte('}') - return q.String() -} - -func findReport(r querybackendv1.ReportType, reports []*querybackendv1.Report) *querybackendv1.Report { - for _, x := range reports { - if x.ReportType == r { - return x - } - } - return nil -} diff --git a/pkg/experiment/queryfrontend/frontend_profile_types.go b/pkg/experiment/queryfrontend/frontend_profile_types.go deleted file mode 100644 index da4b760fab..0000000000 --- a/pkg/experiment/queryfrontend/frontend_profile_types.go +++ /dev/null @@ -1,155 +0,0 @@ -package queryfrontend - -import ( - "context" - "slices" - "sort" - - "connectrpc.com/connect" - "github.com/go-kit/log" - - querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" - typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" - metastoreclient "github.com/grafana/pyroscope/pkg/experiment/metastore/client" - phlaremodel "github.com/grafana/pyroscope/pkg/model" -) - -var profileTypeLabels2 = []string{ - "__profile_type__", - "service_name", -} - -var profileTypeLabels5 = []string{ - "__name__", - "__profile_type__", - "__type__", - "pyroscope_app", - "service_name", -} - -func IsProfileTypeQuery(labels, matchers []string) bool { - if len(matchers) > 0 { - return false - } - var s []string - switch len(labels) { - case 2: - s = profileTypeLabels2 - case 5: - s = profileTypeLabels5 - default: - return false - } - sort.Strings(labels) - return slices.Compare(s, labels) == 0 -} - -func ListProfileTypesFromMetadataAsSeriesLabels( - ctx context.Context, - client *metastoreclient.Client, - logger log.Logger, - tenants []string, - startTime, endTime int64, - labels []string, - -) (*connect.Response[querierv1.SeriesResponse], error) { - resp, err := listProfileTypesFromMetadata(ctx, client, logger, tenants, startTime, endTime) - if err != nil { - return nil, err - } - return connect.NewResponse(&querierv1.SeriesResponse{ - LabelsSet: resp.buildSeriesLabels(labels), - }), nil -} - -func listProfileTypesFromMetadata( - ctx context.Context, - client *metastoreclient.Client, - logger log.Logger, - tenants []string, - startTime, endTime int64, -) (*ptypes, error) { - metas, err := ListMetadata(ctx, client, logger, tenants, startTime, endTime, "{}") - if err != nil { - return nil, err - } - p := newProfileTypesResponseBuilder(len(metas) * 8) - for _, m := range metas { - for _, s := range m.Datasets { - p.addServiceProfileTypes(s.Name, s.ProfileTypes...) - } - } - return p, nil -} - -type ptypes struct { - services map[string]map[string]struct{} -} - -func newProfileTypesResponseBuilder(size int) *ptypes { - return &ptypes{ - services: make(map[string]map[string]struct{}, size), - } -} - -func (p *ptypes) addServiceProfileTypes(s string, types ...string) { - sp, ok := p.services[s] - if !ok { - sp = make(map[string]struct{}, len(types)) - p.services[s] = sp - } - for _, t := range types { - sp[t] = struct{}{} - } -} - -func (p *ptypes) buildSeriesLabels(names []string) (labels []*typesv1.Labels) { - switch len(names) { - case 2: - labels = p.buildSeriesLabels2() - case 5: - labels = p.buildSeriesLabels5() - default: - panic("bug: invalid request: expected 2 or 5 label names") - } - slices.SortFunc(labels, func(a, b *typesv1.Labels) int { - return phlaremodel.CompareLabelPairs(a.Labels, b.Labels) - }) - return labels -} - -func (p *ptypes) buildSeriesLabels2() []*typesv1.Labels { - labels := make([]*typesv1.Labels, 0, len(p.services)*4) - for n, types := range p.services { - for t := range types { - labels = append(labels, &typesv1.Labels{ - Labels: []*typesv1.LabelPair{ - {Name: "__profile_type__", Value: t}, - {Name: "service_name", Value: n}, - }, - }) - } - } - return labels -} - -func (p *ptypes) buildSeriesLabels5() []*typesv1.Labels { - labels := make([]*typesv1.Labels, 0, len(p.services)*4) - for n, types := range p.services { - for t := range types { - pt, err := phlaremodel.ParseProfileTypeSelector(t) - if err != nil { - panic(err) - } - labels = append(labels, &typesv1.Labels{ - Labels: []*typesv1.LabelPair{ - {Name: "__profile_type__", Value: t}, - {Name: "service_name", Value: n}, - {Name: "__name__", Value: pt.Name}, - {Name: "__type__", Value: pt.SampleType}, - }, - }) - } - } - return labels -} diff --git a/pkg/frontend/frontend.go b/pkg/frontend/frontend.go index a55ae8f1b3..445a8e7de4 100644 --- a/pkg/frontend/frontend.go +++ b/pkg/frontend/frontend.go @@ -80,6 +80,7 @@ func (cfg *Config) Validate() error { type Frontend struct { services.Service connectgrpc.GRPCRoundTripper + frontendpb.UnimplementedFrontendForQuerierServer cfg Config log log.Logger @@ -93,7 +94,6 @@ type Frontend struct { schedulerWorkers *frontendSchedulerWorkers schedulerWorkersWatcher *services.FailureWatcher requests *requestsInProgress - frontendpb.UnimplementedFrontendForQuerierServer } type Limits interface { diff --git a/pkg/frontend/frontend_analyze_query.go b/pkg/frontend/frontend_analyze_query.go index 531c556747..e6caeeb9e7 100644 --- a/pkg/frontend/frontend_analyze_query.go +++ b/pkg/frontend/frontend_analyze_query.go @@ -15,10 +15,10 @@ import ( "github.com/grafana/pyroscope/pkg/validation" ) -func (f *Frontend) AnalyzeQuery(ctx context.Context, - c *connect.Request[querierv1.AnalyzeQueryRequest]) ( - *connect.Response[querierv1.AnalyzeQueryResponse], error, -) { +func (f *Frontend) AnalyzeQuery( + ctx context.Context, + c *connect.Request[querierv1.AnalyzeQueryRequest], +) (*connect.Response[querierv1.AnalyzeQueryResponse], error) { opentracing.SpanFromContext(ctx) tenantID, err := tenant.TenantID(ctx) diff --git a/pkg/frontend/frontend_diff.go b/pkg/frontend/frontend_diff.go index 1390770776..09b42f3d64 100644 --- a/pkg/frontend/frontend_diff.go +++ b/pkg/frontend/frontend_diff.go @@ -14,10 +14,10 @@ import ( "github.com/grafana/pyroscope/pkg/validation" ) -func (f *Frontend) Diff(ctx context.Context, - c *connect.Request[querierv1.DiffRequest]) ( - *connect.Response[querierv1.DiffResponse], error, -) { +func (f *Frontend) Diff( + ctx context.Context, + c *connect.Request[querierv1.DiffRequest], +) (*connect.Response[querierv1.DiffResponse], error) { ctx = connectgrpc.WithProcedure(ctx, querierv1connect.QuerierServiceDiffProcedure) g, ctx := errgroup.WithContext(ctx) tenantIDs, err := tenant.TenantIDs(ctx) diff --git a/pkg/frontend/frontend_get_profile_stats.go b/pkg/frontend/frontend_get_profile_stats.go index 0720379804..c7e8c923b1 100644 --- a/pkg/frontend/frontend_get_profile_stats.go +++ b/pkg/frontend/frontend_get_profile_stats.go @@ -11,10 +11,10 @@ import ( "github.com/grafana/pyroscope/pkg/util/connectgrpc" ) -func (f *Frontend) GetProfileStats(ctx context.Context, - c *connect.Request[typesv1.GetProfileStatsRequest]) ( - *connect.Response[typesv1.GetProfileStatsResponse], error, -) { +func (f *Frontend) GetProfileStats( + ctx context.Context, + c *connect.Request[typesv1.GetProfileStatsRequest], +) (*connect.Response[typesv1.GetProfileStatsResponse], error) { opentracing.SpanFromContext(ctx) ctx = connectgrpc.WithProcedure(ctx, querierv1connect.QuerierServiceGetProfileStatsProcedure) diff --git a/pkg/frontend/frontend_label_names.go b/pkg/frontend/frontend_label_names.go index 4eda786cc6..ee836779f9 100644 --- a/pkg/frontend/frontend_label_names.go +++ b/pkg/frontend/frontend_label_names.go @@ -15,7 +15,10 @@ import ( "github.com/grafana/pyroscope/pkg/validation" ) -func (f *Frontend) LabelNames(ctx context.Context, c *connect.Request[typesv1.LabelNamesRequest]) (*connect.Response[typesv1.LabelNamesResponse], error) { +func (f *Frontend) LabelNames( + ctx context.Context, + c *connect.Request[typesv1.LabelNamesRequest], +) (*connect.Response[typesv1.LabelNamesResponse], error) { opentracing.SpanFromContext(ctx). SetTag("start", model.Time(c.Msg.Start).Time().String()). SetTag("end", model.Time(c.Msg.End).Time().String()). diff --git a/pkg/frontend/frontend_label_values.go b/pkg/frontend/frontend_label_values.go index 1378958441..21bf76e785 100644 --- a/pkg/frontend/frontend_label_values.go +++ b/pkg/frontend/frontend_label_values.go @@ -15,7 +15,10 @@ import ( "github.com/grafana/pyroscope/pkg/validation" ) -func (f *Frontend) LabelValues(ctx context.Context, c *connect.Request[typesv1.LabelValuesRequest]) (*connect.Response[typesv1.LabelValuesResponse], error) { +func (f *Frontend) LabelValues( + ctx context.Context, + c *connect.Request[typesv1.LabelValuesRequest], +) (*connect.Response[typesv1.LabelValuesResponse], error) { opentracing.SpanFromContext(ctx). SetTag("start", model.Time(c.Msg.Start).Time().String()). SetTag("end", model.Time(c.Msg.End).Time().String()). diff --git a/pkg/frontend/frontend_profile_types.go b/pkg/frontend/frontend_profile_types.go index 752d7fae21..468144cd05 100644 --- a/pkg/frontend/frontend_profile_types.go +++ b/pkg/frontend/frontend_profile_types.go @@ -15,7 +15,10 @@ import ( "github.com/grafana/pyroscope/pkg/validation" ) -func (f *Frontend) ProfileTypes(ctx context.Context, c *connect.Request[querierv1.ProfileTypesRequest]) (*connect.Response[querierv1.ProfileTypesResponse], error) { +func (f *Frontend) ProfileTypes( + ctx context.Context, + c *connect.Request[querierv1.ProfileTypesRequest], +) (*connect.Response[querierv1.ProfileTypesResponse], error) { opentracing.SpanFromContext(ctx). SetTag("start", model.Time(c.Msg.Start).Time().String()). SetTag("end", model.Time(c.Msg.End).Time().String()) diff --git a/pkg/frontend/frontend_select_merge_profile.go b/pkg/frontend/frontend_select_merge_profile.go index 7d331d2d81..e256ed6402 100644 --- a/pkg/frontend/frontend_select_merge_profile.go +++ b/pkg/frontend/frontend_select_merge_profile.go @@ -20,7 +20,10 @@ import ( "github.com/grafana/pyroscope/pkg/validation" ) -func (f *Frontend) SelectMergeProfile(ctx context.Context, c *connect.Request[querierv1.SelectMergeProfileRequest]) (*connect.Response[profilev1.Profile], error) { +func (f *Frontend) SelectMergeProfile( + ctx context.Context, + c *connect.Request[querierv1.SelectMergeProfileRequest], +) (*connect.Response[profilev1.Profile], error) { opentracing.SpanFromContext(ctx). SetTag("start", model.Time(c.Msg.Start).Time().String()). SetTag("end", model.Time(c.Msg.End).Time().String()). diff --git a/pkg/frontend/frontend_select_merge_span_profile.go b/pkg/frontend/frontend_select_merge_span_profile.go index 85ac11ffc6..d8d5596cb5 100644 --- a/pkg/frontend/frontend_select_merge_span_profile.go +++ b/pkg/frontend/frontend_select_merge_span_profile.go @@ -18,10 +18,10 @@ import ( "github.com/grafana/pyroscope/pkg/validation" ) -func (f *Frontend) SelectMergeSpanProfile(ctx context.Context, - c *connect.Request[querierv1.SelectMergeSpanProfileRequest]) ( - *connect.Response[querierv1.SelectMergeSpanProfileResponse], error, -) { +func (f *Frontend) SelectMergeSpanProfile( + ctx context.Context, + c *connect.Request[querierv1.SelectMergeSpanProfileRequest], +) (*connect.Response[querierv1.SelectMergeSpanProfileResponse], error) { opentracing.SpanFromContext(ctx). SetTag("start", model.Time(c.Msg.Start).Time().String()). SetTag("end", model.Time(c.Msg.End).Time().String()). diff --git a/pkg/frontend/frontend_select_merge_stacktraces.go b/pkg/frontend/frontend_select_merge_stacktraces.go index 1811318e04..f5025763f7 100644 --- a/pkg/frontend/frontend_select_merge_stacktraces.go +++ b/pkg/frontend/frontend_select_merge_stacktraces.go @@ -18,10 +18,10 @@ import ( "github.com/grafana/pyroscope/pkg/validation" ) -func (f *Frontend) SelectMergeStacktraces(ctx context.Context, - c *connect.Request[querierv1.SelectMergeStacktracesRequest]) ( - *connect.Response[querierv1.SelectMergeStacktracesResponse], error, -) { +func (f *Frontend) SelectMergeStacktraces( + ctx context.Context, + c *connect.Request[querierv1.SelectMergeStacktracesRequest], +) (*connect.Response[querierv1.SelectMergeStacktracesResponse], error) { t, err := f.selectMergeStacktracesTree(ctx, c) if err != nil { return nil, err @@ -36,10 +36,10 @@ func (f *Frontend) SelectMergeStacktraces(ctx context.Context, return connect.NewResponse(&resp), nil } -func (f *Frontend) selectMergeStacktracesTree(ctx context.Context, - c *connect.Request[querierv1.SelectMergeStacktracesRequest]) ( - *phlaremodel.Tree, error, -) { +func (f *Frontend) selectMergeStacktracesTree( + ctx context.Context, + c *connect.Request[querierv1.SelectMergeStacktracesRequest], +) (*phlaremodel.Tree, error) { opentracing.SpanFromContext(ctx). SetTag("start", model.Time(c.Msg.Start).Time().String()). SetTag("end", model.Time(c.Msg.End).Time().String()). diff --git a/pkg/frontend/frontend_select_time_series.go b/pkg/frontend/frontend_select_time_series.go index 0c1b4ea9b3..fece898099 100644 --- a/pkg/frontend/frontend_select_time_series.go +++ b/pkg/frontend/frontend_select_time_series.go @@ -18,10 +18,10 @@ import ( "github.com/grafana/pyroscope/pkg/validation" ) -func (f *Frontend) SelectSeries(ctx context.Context, - c *connect.Request[querierv1.SelectSeriesRequest]) ( - *connect.Response[querierv1.SelectSeriesResponse], error, -) { +func (f *Frontend) SelectSeries( + ctx context.Context, + c *connect.Request[querierv1.SelectSeriesRequest], +) (*connect.Response[querierv1.SelectSeriesResponse], error) { opentracing.SpanFromContext(ctx). SetTag("start", model.Time(c.Msg.Start).Time().String()). SetTag("end", model.Time(c.Msg.End).Time().String()). @@ -51,7 +51,7 @@ func (f *Frontend) SelectSeries(ctx context.Context, g.SetLimit(maxConcurrent) } - m := phlaremodel.NewTimeSeriesMerger(false) + m := phlaremodel.NewTimeSeriesMerger(true) interval := validationutil.MaxDurationOrZeroPerTenant(tenantIDs, f.limits.QuerySplitDuration) intervals := NewTimeIntervalIterator(time.UnixMilli(c.Msg.Start), time.UnixMilli(c.Msg.End), interval, WithAlignment(time.Second*time.Duration(c.Msg.Step))) diff --git a/pkg/frontend/frontend_series_labels.go b/pkg/frontend/frontend_series_labels.go index e38854445f..15adb53fd8 100644 --- a/pkg/frontend/frontend_series_labels.go +++ b/pkg/frontend/frontend_series_labels.go @@ -15,7 +15,10 @@ import ( "github.com/grafana/pyroscope/pkg/validation" ) -func (f *Frontend) Series(ctx context.Context, c *connect.Request[querierv1.SeriesRequest]) (*connect.Response[querierv1.SeriesResponse], error) { +func (f *Frontend) Series( + ctx context.Context, + c *connect.Request[querierv1.SeriesRequest], +) (*connect.Response[querierv1.SeriesResponse], error) { opentracing.SpanFromContext(ctx). SetTag("start", model.Time(c.Msg.Start).Time().String()). SetTag("end", model.Time(c.Msg.End).Time().String()). diff --git a/pkg/frontend/frontend_vcs.go b/pkg/frontend/frontend_vcs.go index a642c74c9f..8886043a4f 100644 --- a/pkg/frontend/frontend_vcs.go +++ b/pkg/frontend/frontend_vcs.go @@ -9,23 +9,38 @@ import ( "github.com/grafana/pyroscope/pkg/util/connectgrpc" ) -func (f *Frontend) GithubApp(ctx context.Context, req *connect.Request[vcsv1.GithubAppRequest]) (*connect.Response[vcsv1.GithubAppResponse], error) { +func (f *Frontend) GithubApp( + ctx context.Context, + req *connect.Request[vcsv1.GithubAppRequest], +) (*connect.Response[vcsv1.GithubAppResponse], error) { return connectgrpc.RoundTripUnary[vcsv1.GithubAppRequest, vcsv1.GithubAppResponse](ctx, f, req) } -func (f *Frontend) GithubLogin(ctx context.Context, req *connect.Request[vcsv1.GithubLoginRequest]) (*connect.Response[vcsv1.GithubLoginResponse], error) { +func (f *Frontend) GithubLogin( + ctx context.Context, + req *connect.Request[vcsv1.GithubLoginRequest], +) (*connect.Response[vcsv1.GithubLoginResponse], error) { return connectgrpc.RoundTripUnary[vcsv1.GithubLoginRequest, vcsv1.GithubLoginResponse](ctx, f, req) } -func (f *Frontend) GithubRefresh(ctx context.Context, req *connect.Request[vcsv1.GithubRefreshRequest]) (*connect.Response[vcsv1.GithubRefreshResponse], error) { +func (f *Frontend) GithubRefresh( + ctx context.Context, + req *connect.Request[vcsv1.GithubRefreshRequest], +) (*connect.Response[vcsv1.GithubRefreshResponse], error) { return connectgrpc.RoundTripUnary[vcsv1.GithubRefreshRequest, vcsv1.GithubRefreshResponse](ctx, f, req) } -func (f *Frontend) GetFile(ctx context.Context, req *connect.Request[vcsv1.GetFileRequest]) (*connect.Response[vcsv1.GetFileResponse], error) { +func (f *Frontend) GetFile( + ctx context.Context, + req *connect.Request[vcsv1.GetFileRequest], +) (*connect.Response[vcsv1.GetFileResponse], error) { return connectgrpc.RoundTripUnary[vcsv1.GetFileRequest, vcsv1.GetFileResponse](ctx, f, req) } -func (f *Frontend) GetCommit(ctx context.Context, req *connect.Request[vcsv1.GetCommitRequest]) (*connect.Response[vcsv1.GetCommitResponse], error) { +func (f *Frontend) GetCommit( + ctx context.Context, + req *connect.Request[vcsv1.GetCommitRequest], +) (*connect.Response[vcsv1.GetCommitResponse], error) { return connectgrpc.RoundTripUnary[vcsv1.GetCommitRequest, vcsv1.GetCommitResponse](ctx, f, req) } diff --git a/pkg/frontend/read_path/query_frontend/compat.go b/pkg/frontend/read_path/query_frontend/compat.go new file mode 100644 index 0000000000..4277e36d55 --- /dev/null +++ b/pkg/frontend/read_path/query_frontend/compat.go @@ -0,0 +1,264 @@ +package query_frontend + +import ( + "context" + "fmt" + "slices" + "sort" + "strings" + + "connectrpc.com/connect" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + metastoreclient "github.com/grafana/pyroscope/pkg/experiment/metastore/client" + phlaremodel "github.com/grafana/pyroscope/pkg/model" +) + +// TODO(kolesnikovae): Extend the metastore API to query arbitrary dataset labels. + +var profileTypeLabels2 = []string{ + "__profile_type__", + "service_name", +} + +var profileTypeLabels5 = []string{ + "__name__", + "__profile_type__", + "__type__", + "pyroscope_app", + "service_name", +} + +func isProfileTypeQuery(labels, matchers []string) bool { + if len(matchers) > 0 { + return false + } + var s []string + switch len(labels) { + case 2: + s = profileTypeLabels2 + case 5: + s = profileTypeLabels5 + default: + return false + } + sort.Strings(labels) + return slices.Compare(s, labels) == 0 +} + +func listProfileTypesFromMetadataAsSeriesLabels( + ctx context.Context, + client *metastoreclient.Client, + tenants []string, + startTime int64, + endTime int64, + labels []string, +) (*connect.Response[querierv1.SeriesResponse], error) { + resp, err := listProfileTypesFromMetadata(ctx, client, tenants, startTime, endTime) + if err != nil { + return nil, err + } + return connect.NewResponse(&querierv1.SeriesResponse{ + LabelsSet: resp.buildSeriesLabels(labels), + }), nil +} + +func listProfileTypesFromMetadata( + ctx context.Context, + client *metastoreclient.Client, + tenants []string, + startTime int64, + endTime int64, +) (*ptypes, error) { + md, err := client.QueryMetadata(ctx, &metastorev1.QueryMetadataRequest{ + TenantId: tenants, + StartTime: startTime, + EndTime: endTime, + Query: "{}", + }) + if err != nil { + return nil, err + } + p := newProfileTypesResponseBuilder(len(md.Blocks) * 8) + for _, m := range md.Blocks { + for _, s := range m.Datasets { + p.addServiceProfileTypes(s.Name, s.ProfileTypes...) + } + } + return p, nil +} + +func buildLabelSelectorFromMatchers(matchers []string) (string, error) { + parsed, err := parseMatchers(matchers) + if err != nil { + return "", fmt.Errorf("parsing label selector: %w", err) + } + return matchersToLabelSelector(parsed), nil +} + +func buildLabelSelectorWithProfileType(labelSelector, profileTypeID string) (string, error) { + matchers, err := parser.ParseMetricSelector(labelSelector) + if err != nil { + return "", fmt.Errorf("parsing label selector %q: %w", labelSelector, err) + } + profileType, err := phlaremodel.ParseProfileTypeSelector(profileTypeID) + if err != nil { + return "", fmt.Errorf("parsing profile type ID %q: %w", profileTypeID, err) + } + matchers = append(matchers, phlaremodel.SelectorFromProfileType(profileType)) + return matchersToLabelSelector(matchers), nil +} + +func parseMatchers(matchers []string) ([]*labels.Matcher, error) { + parsed := make([]*labels.Matcher, 0, len(matchers)) + for _, m := range matchers { + s, err := parser.ParseMetricSelector(m) + if err != nil { + return nil, fmt.Errorf("failed to parse label selector %q: %w", s, err) + } + parsed = append(parsed, s...) + } + return parsed, nil +} + +func matchersToLabelSelector(matchers []*labels.Matcher) string { + var q strings.Builder + q.WriteByte('{') + for i, m := range matchers { + if i > 0 { + q.WriteByte(',') + } + q.WriteString(m.Name) + q.WriteString(m.Type.String()) + q.WriteByte('"') + q.WriteString(m.Value) + q.WriteByte('"') + } + q.WriteByte('}') + return q.String() +} + +func findReport(r queryv1.ReportType, reports []*queryv1.Report) *queryv1.Report { + for _, x := range reports { + if x.ReportType == r { + return x + } + } + return nil +} + +type ptypes struct { + services map[string]map[string]struct{} +} + +func newProfileTypesResponseBuilder(size int) *ptypes { + return &ptypes{services: make(map[string]map[string]struct{}, size)} +} + +func (p *ptypes) addServiceProfileTypes(s string, types ...string) { + sp, ok := p.services[s] + if !ok { + sp = make(map[string]struct{}, len(types)) + p.services[s] = sp + } + for _, t := range types { + sp[t] = struct{}{} + } +} + +func (p *ptypes) buildSeriesLabels(names []string) (labels []*typesv1.Labels) { + switch len(names) { + case 2: + labels = p.buildSeriesLabels2() + case 5: + labels = p.buildSeriesLabels5() + default: + panic("bug: invalid request: expected 2 or 5 label names") + } + slices.SortFunc(labels, func(a, b *typesv1.Labels) int { + return phlaremodel.CompareLabelPairs(a.Labels, b.Labels) + }) + return labels +} + +func (p *ptypes) buildSeriesLabels2() []*typesv1.Labels { + labels := make([]*typesv1.Labels, 0, len(p.services)*4) + for n, types := range p.services { + for t := range types { + labels = append(labels, &typesv1.Labels{ + Labels: []*typesv1.LabelPair{ + {Name: "__profile_type__", Value: t}, + {Name: "service_name", Value: n}, + }, + }) + } + } + return labels +} + +func (p *ptypes) buildSeriesLabels5() []*typesv1.Labels { + labels := make([]*typesv1.Labels, 0, len(p.services)*4) + for n, types := range p.services { + for t := range types { + pt, err := phlaremodel.ParseProfileTypeSelector(t) + if err != nil { + panic("bug: invalid profile type: " + err.Error()) + } + labels = append(labels, &typesv1.Labels{ + Labels: []*typesv1.LabelPair{ + {Name: "__profile_type__", Value: t}, + {Name: "service_name", Value: n}, + {Name: "__name__", Value: pt.Name}, + {Name: "__type__", Value: pt.SampleType}, + }, + }) + } + } + return labels +} + +//nolint:unused +func printStats(logger log.Logger, blocks []*metastorev1.BlockMeta) { + type blockMetaStats struct { + level uint32 + minTime int64 + maxTime int64 + size uint64 + count int + } + m := make(map[uint32]*blockMetaStats) + for _, b := range blocks { + s, ok := m[b.CompactionLevel] + if !ok { + s = &blockMetaStats{level: b.CompactionLevel} + m[b.CompactionLevel] = s + } + for _, x := range b.Datasets { + s.size += x.Size + } + s.count++ + } + sorted := make([]*blockMetaStats, 0, len(m)) + for _, s := range m { + sorted = append(sorted, s) + } + slices.SortFunc(sorted, func(a, b *blockMetaStats) int { + return int(a.level - b.level) + }) + fields := make([]interface{}, 0, 4+len(sorted)*2) + fields = append(fields, "msg", "block metadata list", "blocks_total", fmt.Sprint(len(blocks))) + for _, s := range sorted { + fields = append(fields, + fmt.Sprintf("l%d_blocks", s.level), fmt.Sprint(s.count), + fmt.Sprintf("l%d_size", s.level), fmt.Sprint(s.size), + ) + } + _ = level.Info(logger).Log(fields...) +} diff --git a/pkg/frontend/read_path/query_frontend/query_diff.go b/pkg/frontend/read_path/query_frontend/query_diff.go new file mode 100644 index 0000000000..a3097898cd --- /dev/null +++ b/pkg/frontend/read_path/query_frontend/query_diff.go @@ -0,0 +1,56 @@ +package query_frontend + +import ( + "context" + + "connectrpc.com/connect" + "github.com/grafana/dskit/tenant" + "golang.org/x/sync/errgroup" + + querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" + phlaremodel "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/validation" +) + +func (q *QueryFrontend) Diff( + ctx context.Context, + c *connect.Request[querierv1.DiffRequest], +) (*connect.Response[querierv1.DiffResponse], error) { + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + + maxNodes := c.Msg.Left.GetMaxNodes() + if n := c.Msg.Right.GetMaxNodes(); n > maxNodes { + maxNodes = n + } + maxNodes, err = validation.ValidateMaxNodes(q.limits, tenantIDs, maxNodes) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + c.Msg.Left.MaxNodes = &maxNodes + c.Msg.Right.MaxNodes = &maxNodes + + var left, right []byte + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { + var leftErr error + left, leftErr = q.selectMergeStacktracesTree(ctx, connect.NewRequest(c.Msg.Left)) + return leftErr + }) + g.Go(func() error { + var rightErr error + right, rightErr = q.selectMergeStacktracesTree(ctx, connect.NewRequest(c.Msg.Right)) + return rightErr + }) + if err = g.Wait(); err != nil { + return nil, err + } + + diff, err := phlaremodel.NewFlamegraphDiffFromBytes(left, right, maxNodes) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + return connect.NewResponse(&querierv1.DiffResponse{Flamegraph: diff}), nil +} diff --git a/pkg/frontend/read_path/query_frontend/query_frontend.go b/pkg/frontend/read_path/query_frontend/query_frontend.go new file mode 100644 index 0000000000..191ff0ded0 --- /dev/null +++ b/pkg/frontend/read_path/query_frontend/query_frontend.go @@ -0,0 +1,111 @@ +package query_frontend + +import ( + "context" + "math/rand" + + "connectrpc.com/connect" + "github.com/go-kit/log" + "github.com/grafana/dskit/tenant" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1/querierv1connect" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + metastoreclient "github.com/grafana/pyroscope/pkg/experiment/metastore/client" + querybackend "github.com/grafana/pyroscope/pkg/experiment/query_backend" + querybackendclient "github.com/grafana/pyroscope/pkg/experiment/query_backend/client" + queryplan "github.com/grafana/pyroscope/pkg/experiment/query_backend/query_plan" + "github.com/grafana/pyroscope/pkg/frontend" +) + +var _ querierv1connect.QuerierServiceClient = (*QueryFrontend)(nil) + +type QueryFrontend struct { + logger log.Logger + limits frontend.Limits + metastore *metastoreclient.Client + querybackend *querybackendclient.Client +} + +func NewQueryFrontend( + logger log.Logger, + limits frontend.Limits, + metastore *metastoreclient.Client, + querybackend *querybackendclient.Client, +) *QueryFrontend { + return &QueryFrontend{ + logger: logger, + limits: limits, + metastore: metastore, + querybackend: querybackend, + } +} + +var xrand = rand.New(rand.NewSource(4349676827832284783)) + +func (q *QueryFrontend) Query( + ctx context.Context, + req *queryv1.QueryRequest, +) (*queryv1.QueryResponse, error) { + // TODO(kolesnikovae): + // This method is supposed to be the entry point of the read path + // in the future versions. Therefore, validation, overrides, and + // rest of the request handling should be moved here. + tenants, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + md, err := q.metastore.QueryMetadata(ctx, &metastorev1.QueryMetadataRequest{ + TenantId: tenants, + StartTime: req.StartTime, + EndTime: req.EndTime, + Query: req.LabelSelector, + }) + if err != nil { + return nil, err + } + if len(md.Blocks) == 0 { + return new(queryv1.QueryResponse), nil + } + + // TODO(kolesnikovae): Implement query planning. + // Randomize the order of blocks to avoid hotspots. + xrand.Shuffle(len(md.Blocks), func(i, j int) { + md.Blocks[i], md.Blocks[j] = md.Blocks[j], md.Blocks[i] + }) + p := queryplan.Build(md.Blocks, 4, 20) + + resp, err := q.querybackend.Invoke(ctx, &queryv1.InvokeRequest{ + Tenant: tenants, + StartTime: req.StartTime, + EndTime: req.EndTime, + LabelSelector: req.LabelSelector, + Options: &queryv1.InvokeOptions{}, + QueryPlan: p.Proto(), + Query: req.Query, + }) + if err != nil { + return nil, err + } + // TODO(kolesnikovae): Diagnostics. + return &queryv1.QueryResponse{Reports: resp.Reports}, nil +} + +// querySingle is a helper method that expects a single report +// of the appropriate type in the response; this method should +// be used to implement adapter to the old query API. +func (q *QueryFrontend) querySingle( + ctx context.Context, + req *queryv1.QueryRequest, +) (*queryv1.Report, error) { + if len(req.Query) != 1 { + // Nil report is a valid response. + return nil, nil + } + t := querybackend.QueryReportType(req.Query[0].QueryType) + resp, err := q.Query(ctx, req) + if err != nil { + return nil, err + } + return findReport(t, resp.Reports), nil +} diff --git a/pkg/frontend/read_path/query_frontend/query_label_names.go b/pkg/frontend/read_path/query_frontend/query_label_names.go new file mode 100644 index 0000000000..8a3ac00f1d --- /dev/null +++ b/pkg/frontend/read_path/query_frontend/query_label_names.go @@ -0,0 +1,57 @@ +package query_frontend + +import ( + "context" + + "connectrpc.com/connect" + "github.com/grafana/dskit/tenant" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/common/model" + + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + "github.com/grafana/pyroscope/pkg/validation" +) + +func (q *QueryFrontend) LabelNames( + ctx context.Context, + c *connect.Request[typesv1.LabelNamesRequest], +) (*connect.Response[typesv1.LabelNamesResponse], error) { + opentracing.SpanFromContext(ctx). + SetTag("start", model.Time(c.Msg.Start).Time().String()). + SetTag("end", model.Time(c.Msg.End).Time().String()). + SetTag("matchers", c.Msg.Matchers) + + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + empty, err := validation.SanitizeTimeRange(q.limits, tenantIDs, &c.Msg.Start, &c.Msg.End) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + if empty { + return connect.NewResponse(&typesv1.LabelNamesResponse{}), nil + } + + labelSelector, err := buildLabelSelectorFromMatchers(c.Msg.Matchers) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + report, err := q.querySingle(ctx, &queryv1.QueryRequest{ + StartTime: c.Msg.Start, + EndTime: c.Msg.End, + LabelSelector: labelSelector, + Query: []*queryv1.Query{{ + QueryType: queryv1.QueryType_QUERY_LABEL_NAMES, + LabelNames: &queryv1.LabelNamesQuery{}, + }}, + }) + if err != nil { + return nil, err + } + if report == nil { + return connect.NewResponse(&typesv1.LabelNamesResponse{}), nil + } + return connect.NewResponse(&typesv1.LabelNamesResponse{Names: report.LabelNames.LabelNames}), nil +} diff --git a/pkg/frontend/read_path/query_frontend/query_label_values.go b/pkg/frontend/read_path/query_frontend/query_label_values.go new file mode 100644 index 0000000000..948787c50b --- /dev/null +++ b/pkg/frontend/read_path/query_frontend/query_label_values.go @@ -0,0 +1,58 @@ +package query_frontend + +import ( + "context" + + "connectrpc.com/connect" + "github.com/grafana/dskit/tenant" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/common/model" + + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + "github.com/grafana/pyroscope/pkg/validation" +) + +func (q *QueryFrontend) LabelValues( + ctx context.Context, + c *connect.Request[typesv1.LabelValuesRequest], +) (*connect.Response[typesv1.LabelValuesResponse], error) { + opentracing.SpanFromContext(ctx). + SetTag("start", model.Time(c.Msg.Start).Time().String()). + SetTag("end", model.Time(c.Msg.End).Time().String()). + SetTag("matchers", c.Msg.Matchers). + SetTag("name", c.Msg.Name) + + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + empty, err := validation.SanitizeTimeRange(q.limits, tenantIDs, &c.Msg.Start, &c.Msg.End) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + if empty { + return connect.NewResponse(&typesv1.LabelValuesResponse{}), nil + } + + labelSelector, err := buildLabelSelectorFromMatchers(c.Msg.Matchers) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + report, err := q.querySingle(ctx, &queryv1.QueryRequest{ + StartTime: c.Msg.Start, + EndTime: c.Msg.End, + LabelSelector: labelSelector, + Query: []*queryv1.Query{{ + QueryType: queryv1.QueryType_QUERY_LABEL_VALUES, + LabelValues: &queryv1.LabelValuesQuery{LabelName: c.Msg.Name}, + }}, + }) + if err != nil { + return nil, err + } + if report == nil { + return connect.NewResponse(&typesv1.LabelValuesResponse{}), nil + } + return connect.NewResponse(&typesv1.LabelValuesResponse{Names: report.LabelValues.LabelValues}), nil +} diff --git a/pkg/frontend/read_path/query_frontend/query_select_merge_profile.go b/pkg/frontend/read_path/query_frontend/query_select_merge_profile.go new file mode 100644 index 0000000000..6655993478 --- /dev/null +++ b/pkg/frontend/read_path/query_frontend/query_select_merge_profile.go @@ -0,0 +1,69 @@ +package query_frontend + +import ( + "context" + + "connectrpc.com/connect" + "github.com/grafana/dskit/tenant" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/common/model" + + profilev1 "github.com/grafana/pyroscope/api/gen/proto/go/google/v1" + querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + "github.com/grafana/pyroscope/pkg/pprof" + "github.com/grafana/pyroscope/pkg/validation" +) + +func (q *QueryFrontend) SelectMergeProfile( + ctx context.Context, + c *connect.Request[querierv1.SelectMergeProfileRequest], +) (*connect.Response[profilev1.Profile], error) { + opentracing.SpanFromContext(ctx). + SetTag("start", model.Time(c.Msg.Start).Time().String()). + SetTag("end", model.Time(c.Msg.End).Time().String()). + SetTag("selector", c.Msg.LabelSelector). + SetTag("max_nodes", c.Msg.GetMaxNodes()). + SetTag("profile_type", c.Msg.ProfileTypeID) + + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + empty, err := validation.SanitizeTimeRange(q.limits, tenantIDs, &c.Msg.Start, &c.Msg.End) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + if empty { + return connect.NewResponse(&profilev1.Profile{}), nil + } + + // NOTE: Max nodes limit is not set by default: + // the method is used for pprof export and + // truncation might not be desirable. + + labelSelector, err := buildLabelSelectorWithProfileType(c.Msg.LabelSelector, c.Msg.ProfileTypeID) + if err != nil { + return nil, err + } + report, err := q.querySingle(ctx, &queryv1.QueryRequest{ + StartTime: c.Msg.Start, + EndTime: c.Msg.End, + LabelSelector: labelSelector, + Query: []*queryv1.Query{{ + QueryType: queryv1.QueryType_QUERY_PPROF, + Pprof: &queryv1.PprofQuery{MaxNodes: c.Msg.GetMaxNodes()}, + }}, + }) + if err != nil { + return nil, err + } + if report == nil { + return nil, nil + } + var p profilev1.Profile + if err = pprof.Unmarshal(report.Pprof.Pprof, &p); err != nil { + return nil, err + } + return connect.NewResponse(&p), nil +} diff --git a/pkg/frontend/read_path/query_frontend/query_select_merge_span_profile.go b/pkg/frontend/read_path/query_frontend/query_select_merge_span_profile.go new file mode 100644 index 0000000000..6c7f07e74f --- /dev/null +++ b/pkg/frontend/read_path/query_frontend/query_select_merge_span_profile.go @@ -0,0 +1,78 @@ +package query_frontend + +import ( + "context" + + "connectrpc.com/connect" + "github.com/grafana/dskit/tenant" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/common/model" + + querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + phlaremodel "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/validation" +) + +// TODO(kolesnikovae): Implement span selector. + +func (q *QueryFrontend) SelectMergeSpanProfile( + ctx context.Context, + c *connect.Request[querierv1.SelectMergeSpanProfileRequest], +) (*connect.Response[querierv1.SelectMergeSpanProfileResponse], error) { + opentracing.SpanFromContext(ctx). + SetTag("start", model.Time(c.Msg.Start).Time().String()). + SetTag("end", model.Time(c.Msg.End).Time().String()). + SetTag("selector", c.Msg.LabelSelector). + SetTag("max_nodes", c.Msg.GetMaxNodes()). + SetTag("profile_type", c.Msg.ProfileTypeID) + + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + empty, err := validation.SanitizeTimeRange(q.limits, tenantIDs, &c.Msg.Start, &c.Msg.End) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + if empty { + return connect.NewResponse(&querierv1.SelectMergeSpanProfileResponse{}), nil + } + + maxNodes, err := validation.ValidateMaxNodes(q.limits, tenantIDs, c.Msg.GetMaxNodes()) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + labelSelector, err := buildLabelSelectorWithProfileType(c.Msg.LabelSelector, c.Msg.ProfileTypeID) + if err != nil { + return nil, err + } + report, err := q.querySingle(ctx, &queryv1.QueryRequest{ + StartTime: c.Msg.Start, + EndTime: c.Msg.End, + LabelSelector: labelSelector, + Query: []*queryv1.Query{{ + QueryType: queryv1.QueryType_QUERY_TREE, + Tree: &queryv1.TreeQuery{MaxNodes: maxNodes}, + }}, + }) + if err != nil { + return nil, err + } + if report == nil { + return connect.NewResponse(&querierv1.SelectMergeSpanProfileResponse{}), nil + } + + var resp querierv1.SelectMergeSpanProfileResponse + switch c.Msg.Format { + case querierv1.ProfileFormat_PROFILE_FORMAT_TREE: + resp.Tree = report.Tree.Tree + default: + t, err := phlaremodel.UnmarshalTree(report.Tree.Tree) + if err != nil { + return nil, err + } + resp.Flamegraph = phlaremodel.NewFlameGraph(t, c.Msg.GetMaxNodes()) + } + return connect.NewResponse(&resp), nil +} diff --git a/pkg/frontend/read_path/query_frontend/query_select_merge_stacktraces.go b/pkg/frontend/read_path/query_frontend/query_select_merge_stacktraces.go new file mode 100644 index 0000000000..45a30e7eb4 --- /dev/null +++ b/pkg/frontend/read_path/query_frontend/query_select_merge_stacktraces.go @@ -0,0 +1,86 @@ +package query_frontend + +import ( + "context" + + "connectrpc.com/connect" + "github.com/grafana/dskit/tenant" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/common/model" + + querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + phlaremodel "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/validation" +) + +func (q *QueryFrontend) SelectMergeStacktraces( + ctx context.Context, + c *connect.Request[querierv1.SelectMergeStacktracesRequest], +) (*connect.Response[querierv1.SelectMergeStacktracesResponse], error) { + b, err := q.selectMergeStacktracesTree(ctx, c) + if err != nil { + return nil, err + } + var resp querierv1.SelectMergeStacktracesResponse + switch c.Msg.Format { + case querierv1.ProfileFormat_PROFILE_FORMAT_TREE: + resp.Tree = b + default: + t, err := phlaremodel.UnmarshalTree(b) + if err != nil { + return nil, err + } + resp.Flamegraph = phlaremodel.NewFlameGraph(t, c.Msg.GetMaxNodes()) + } + return connect.NewResponse(&resp), nil +} + +func (q *QueryFrontend) selectMergeStacktracesTree( + ctx context.Context, + c *connect.Request[querierv1.SelectMergeStacktracesRequest], +) (tree []byte, err error) { + opentracing.SpanFromContext(ctx). + SetTag("start", model.Time(c.Msg.Start).Time().String()). + SetTag("end", model.Time(c.Msg.End).Time().String()). + SetTag("selector", c.Msg.LabelSelector). + SetTag("max_nodes", c.Msg.GetMaxNodes()). + SetTag("profile_type", c.Msg.ProfileTypeID) + + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + empty, err := validation.SanitizeTimeRange(q.limits, tenantIDs, &c.Msg.Start, &c.Msg.End) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + if empty { + return nil, nil + } + + maxNodes, err := validation.ValidateMaxNodes(q.limits, tenantIDs, c.Msg.GetMaxNodes()) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + labelSelector, err := buildLabelSelectorWithProfileType(c.Msg.LabelSelector, c.Msg.ProfileTypeID) + if err != nil { + return nil, err + } + report, err := q.querySingle(ctx, &queryv1.QueryRequest{ + StartTime: c.Msg.Start, + EndTime: c.Msg.End, + LabelSelector: labelSelector, + Query: []*queryv1.Query{{ + QueryType: queryv1.QueryType_QUERY_TREE, + Tree: &queryv1.TreeQuery{MaxNodes: maxNodes}, + }}, + }) + if err != nil { + return nil, err + } + if report == nil { + return nil, nil + } + return report.Tree.Tree, nil +} diff --git a/pkg/frontend/read_path/query_frontend/query_select_time_series.go b/pkg/frontend/read_path/query_frontend/query_select_time_series.go new file mode 100644 index 0000000000..3e15496a54 --- /dev/null +++ b/pkg/frontend/read_path/query_frontend/query_select_time_series.go @@ -0,0 +1,63 @@ +package query_frontend + +import ( + "context" + + "connectrpc.com/connect" + "github.com/grafana/dskit/tenant" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/common/model" + + querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + "github.com/grafana/pyroscope/pkg/validation" +) + +func (q *QueryFrontend) SelectSeries( + ctx context.Context, + c *connect.Request[querierv1.SelectSeriesRequest], +) (*connect.Response[querierv1.SelectSeriesResponse], error) { + opentracing.SpanFromContext(ctx). + SetTag("start", model.Time(c.Msg.Start).Time().String()). + SetTag("end", model.Time(c.Msg.End).Time().String()). + SetTag("selector", c.Msg.LabelSelector). + SetTag("step", c.Msg.Step). + SetTag("by", c.Msg.GroupBy). + SetTag("profile_type", c.Msg.ProfileTypeID) + + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + empty, err := validation.SanitizeTimeRange(q.limits, tenantIDs, &c.Msg.Start, &c.Msg.End) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + if empty { + return connect.NewResponse(&querierv1.SelectSeriesResponse{}), nil + } + + labelSelector, err := buildLabelSelectorWithProfileType(c.Msg.LabelSelector, c.Msg.ProfileTypeID) + if err != nil { + return nil, err + } + report, err := q.querySingle(ctx, &queryv1.QueryRequest{ + StartTime: c.Msg.Start, + EndTime: c.Msg.End, + LabelSelector: labelSelector, + Query: []*queryv1.Query{{ + QueryType: queryv1.QueryType_QUERY_TIME_SERIES, + TimeSeries: &queryv1.TimeSeriesQuery{ + Step: c.Msg.GetStep(), + GroupBy: c.Msg.GetGroupBy(), + }, + }}, + }) + if err != nil { + return nil, err + } + if report == nil { + return connect.NewResponse(&querierv1.SelectSeriesResponse{}), nil + } + return connect.NewResponse(&querierv1.SelectSeriesResponse{Series: report.TimeSeries.TimeSeries}), nil +} diff --git a/pkg/frontend/read_path/query_frontend/query_series_labels.go b/pkg/frontend/read_path/query_frontend/query_series_labels.go new file mode 100644 index 0000000000..a8ac271b44 --- /dev/null +++ b/pkg/frontend/read_path/query_frontend/query_series_labels.go @@ -0,0 +1,66 @@ +package query_frontend + +import ( + "context" + + "connectrpc.com/connect" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/tenant" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/common/model" + + querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" + queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" + "github.com/grafana/pyroscope/pkg/validation" +) + +func (q *QueryFrontend) Series( + ctx context.Context, + c *connect.Request[querierv1.SeriesRequest], +) (*connect.Response[querierv1.SeriesResponse], error) { + opentracing.SpanFromContext(ctx). + SetTag("start", model.Time(c.Msg.Start).Time().String()). + SetTag("end", model.Time(c.Msg.End).Time().String()). + SetTag("matchers", c.Msg.Matchers). + SetTag("label_names", c.Msg.LabelNames) + + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + empty, err := validation.SanitizeTimeRange(q.limits, tenantIDs, &c.Msg.Start, &c.Msg.End) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + if empty { + return connect.NewResponse(&querierv1.SeriesResponse{}), nil + } + + if isProfileTypeQuery(c.Msg.LabelNames, c.Msg.Matchers) { + _ = level.Debug(q.logger).Log("msg", "listing profile types from metadata as series labels") + return listProfileTypesFromMetadataAsSeriesLabels(ctx, q.metastore, tenantIDs, c.Msg.Start, c.Msg.End, c.Msg.LabelNames) + } + + labelSelector, err := buildLabelSelectorFromMatchers(c.Msg.Matchers) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + report, err := q.querySingle(ctx, &queryv1.QueryRequest{ + StartTime: c.Msg.Start, + EndTime: c.Msg.End, + LabelSelector: labelSelector, + Query: []*queryv1.Query{{ + QueryType: queryv1.QueryType_QUERY_SERIES_LABELS, + SeriesLabels: &queryv1.SeriesLabelsQuery{ + LabelNames: c.Msg.LabelNames, + }, + }}, + }) + if err != nil { + return nil, err + } + if report == nil { + return connect.NewResponse(&querierv1.SeriesResponse{}), nil + } + return connect.NewResponse(&querierv1.SeriesResponse{LabelsSet: report.SeriesLabels.SeriesLabels}), nil +} diff --git a/pkg/frontend/read_path/query_frontend/query_stubs.go b/pkg/frontend/read_path/query_frontend/query_stubs.go new file mode 100644 index 0000000000..e0831cc6df --- /dev/null +++ b/pkg/frontend/read_path/query_frontend/query_stubs.go @@ -0,0 +1,33 @@ +package query_frontend + +import ( + "context" + + "connectrpc.com/connect" + + querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" +) + +// TODO(kolesnikovae): Decide whether we want to implement those. + +func (q *QueryFrontend) AnalyzeQuery( + context.Context, + *connect.Request[querierv1.AnalyzeQueryRequest], +) (*connect.Response[querierv1.AnalyzeQueryResponse], error) { + return connect.NewResponse(&querierv1.AnalyzeQueryResponse{}), nil +} + +func (q *QueryFrontend) GetProfileStats( + context.Context, + *connect.Request[typesv1.GetProfileStatsRequest], +) (*connect.Response[typesv1.GetProfileStatsResponse], error) { + return connect.NewResponse(&typesv1.GetProfileStatsResponse{}), nil +} + +func (q *QueryFrontend) ProfileTypes( + context.Context, + *connect.Request[querierv1.ProfileTypesRequest], +) (*connect.Response[querierv1.ProfileTypesResponse], error) { + return connect.NewResponse(&querierv1.ProfileTypesResponse{}), nil +} diff --git a/pkg/frontend/read_path/query_service_handler.go b/pkg/frontend/read_path/query_service_handler.go new file mode 100644 index 0000000000..6a4fb9617f --- /dev/null +++ b/pkg/frontend/read_path/query_service_handler.go @@ -0,0 +1,212 @@ +package read_path + +import ( + "context" + + "connectrpc.com/connect" + "golang.org/x/sync/errgroup" + + profilev1 "github.com/grafana/pyroscope/api/gen/proto/go/google/v1" + querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" + "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1/querierv1connect" + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + phlaremodel "github.com/grafana/pyroscope/pkg/model" + "github.com/grafana/pyroscope/pkg/pprof" +) + +var _ querierv1connect.QuerierServiceHandler = (*Router)(nil) + +func (r *Router) LabelValues( + ctx context.Context, + c *connect.Request[typesv1.LabelValuesRequest], +) (*connect.Response[typesv1.LabelValuesResponse], error) { + return Query[typesv1.LabelValuesRequest, typesv1.LabelValuesResponse](ctx, r, c, + func(a, b *typesv1.LabelValuesResponse) (*typesv1.LabelValuesResponse, error) { + m := phlaremodel.NewLabelMerger() + m.MergeLabelValues(a.Names) + m.MergeLabelValues(b.Names) + return &typesv1.LabelValuesResponse{Names: m.LabelValues()}, nil + }) +} + +func (r *Router) LabelNames( + ctx context.Context, + c *connect.Request[typesv1.LabelNamesRequest], +) (*connect.Response[typesv1.LabelNamesResponse], error) { + return Query[typesv1.LabelNamesRequest, typesv1.LabelNamesResponse](ctx, r, c, + func(a, b *typesv1.LabelNamesResponse) (*typesv1.LabelNamesResponse, error) { + m := phlaremodel.NewLabelMerger() + m.MergeLabelNames(a.Names) + m.MergeLabelNames(b.Names) + return &typesv1.LabelNamesResponse{Names: m.LabelNames()}, nil + }) +} + +func (r *Router) Series( + ctx context.Context, + c *connect.Request[querierv1.SeriesRequest], +) (*connect.Response[querierv1.SeriesResponse], error) { + return Query[querierv1.SeriesRequest, querierv1.SeriesResponse](ctx, r, c, + func(a, b *querierv1.SeriesResponse) (*querierv1.SeriesResponse, error) { + m := phlaremodel.NewLabelMerger() + m.MergeSeries(a.LabelsSet) + m.MergeSeries(b.LabelsSet) + return &querierv1.SeriesResponse{LabelsSet: m.SeriesLabels()}, nil + }) +} + +func (r *Router) SelectMergeStacktraces( + ctx context.Context, + c *connect.Request[querierv1.SelectMergeStacktracesRequest], +) (*connect.Response[querierv1.SelectMergeStacktracesResponse], error) { + // We always query data in the tree format and + // return it in the format requested by the client. + f := c.Msg.Format + c.Msg.Format = querierv1.ProfileFormat_PROFILE_FORMAT_TREE + resp, err := Query[querierv1.SelectMergeStacktracesRequest, querierv1.SelectMergeStacktracesResponse](ctx, r, c, + func(a, b *querierv1.SelectMergeStacktracesResponse) (*querierv1.SelectMergeStacktracesResponse, error) { + m := phlaremodel.NewTreeMerger() + if err := m.MergeTreeBytes(a.Tree); err != nil { + return nil, err + } + if err := m.MergeTreeBytes(b.Tree); err != nil { + return nil, err + } + tree := m.Tree().Bytes(c.Msg.GetMaxNodes()) + return &querierv1.SelectMergeStacktracesResponse{Tree: tree}, nil + }, + ) + if err == nil && f != c.Msg.Format { + resp.Msg.Flamegraph = phlaremodel.NewFlameGraph( + phlaremodel.MustUnmarshalTree(resp.Msg.Tree), + c.Msg.GetMaxNodes()) + } + return resp, err +} + +func (r *Router) SelectMergeSpanProfile( + ctx context.Context, + c *connect.Request[querierv1.SelectMergeSpanProfileRequest], +) (*connect.Response[querierv1.SelectMergeSpanProfileResponse], error) { + // We always query data in the tree format and + // return it in the format requested by the client. + f := c.Msg.Format + c.Msg.Format = querierv1.ProfileFormat_PROFILE_FORMAT_TREE + resp, err := Query[querierv1.SelectMergeSpanProfileRequest, querierv1.SelectMergeSpanProfileResponse](ctx, r, c, + func(a, b *querierv1.SelectMergeSpanProfileResponse) (*querierv1.SelectMergeSpanProfileResponse, error) { + m := phlaremodel.NewTreeMerger() + if err := m.MergeTreeBytes(a.Tree); err != nil { + return nil, err + } + if err := m.MergeTreeBytes(b.Tree); err != nil { + return nil, err + } + tree := m.Tree().Bytes(c.Msg.GetMaxNodes()) + return &querierv1.SelectMergeSpanProfileResponse{Tree: tree}, nil + }, + ) + if err == nil && f != c.Msg.Format { + resp.Msg.Flamegraph = phlaremodel.NewFlameGraph( + phlaremodel.MustUnmarshalTree(resp.Msg.Tree), + c.Msg.GetMaxNodes()) + } + return resp, err +} + +func (r *Router) SelectMergeProfile( + ctx context.Context, + c *connect.Request[querierv1.SelectMergeProfileRequest], +) (*connect.Response[profilev1.Profile], error) { + return Query[querierv1.SelectMergeProfileRequest, profilev1.Profile](ctx, r, c, + func(a, b *profilev1.Profile) (*profilev1.Profile, error) { + var m pprof.ProfileMerge + if err := m.Merge(a); err != nil { + return nil, err + } + if err := m.Merge(b); err != nil { + return nil, err + } + return m.Profile(), nil + }) +} + +func (r *Router) SelectSeries( + ctx context.Context, + c *connect.Request[querierv1.SelectSeriesRequest], +) (*connect.Response[querierv1.SelectSeriesResponse], error) { + return Query[querierv1.SelectSeriesRequest, querierv1.SelectSeriesResponse](ctx, r, c, + func(a, b *querierv1.SelectSeriesResponse) (*querierv1.SelectSeriesResponse, error) { + m := phlaremodel.NewTimeSeriesMerger(true) + m.MergeTimeSeries(a.Series) + m.MergeTimeSeries(b.Series) + return &querierv1.SelectSeriesResponse{Series: m.TimeSeries()}, nil + }) +} + +func (r *Router) Diff( + ctx context.Context, + c *connect.Request[querierv1.DiffRequest], +) (*connect.Response[querierv1.DiffResponse], error) { + g, ctx := errgroup.WithContext(ctx) + getTree := func(dst *phlaremodel.Tree, req *querierv1.SelectMergeStacktracesRequest) func() error { + return func() error { + resp, err := r.SelectMergeStacktraces(ctx, connect.NewRequest(req)) + if err != nil { + return err + } + tree, err := phlaremodel.UnmarshalTree(resp.Msg.Tree) + if err != nil { + return err + } + *dst = *tree + return nil + } + } + + var left, right phlaremodel.Tree + g.Go(getTree(&left, c.Msg.Left)) + g.Go(getTree(&right, c.Msg.Right)) + if err := g.Wait(); err != nil { + return nil, err + } + + diff, err := phlaremodel.NewFlamegraphDiff(&left, &right, 0) + if err != nil { + return nil, err + } + + return connect.NewResponse(&querierv1.DiffResponse{Flamegraph: diff}), nil +} + +// Stubs: these methods are not supposed to be implemented +// and only needed to satisfy interfaces. + +func (r *Router) AnalyzeQuery( + ctx context.Context, + req *connect.Request[querierv1.AnalyzeQueryRequest], +) (*connect.Response[querierv1.AnalyzeQueryResponse], error) { + if r.frontend != nil { + return r.frontend.AnalyzeQuery(ctx, req) + } + return connect.NewResponse(&querierv1.AnalyzeQueryResponse{}), nil +} + +func (r *Router) GetProfileStats( + ctx context.Context, + req *connect.Request[typesv1.GetProfileStatsRequest], +) (*connect.Response[typesv1.GetProfileStatsResponse], error) { + if r.frontend != nil { + return r.frontend.GetProfileStats(ctx, req) + } + return connect.NewResponse(&typesv1.GetProfileStatsResponse{}), nil +} + +func (r *Router) ProfileTypes( + ctx context.Context, + req *connect.Request[querierv1.ProfileTypesRequest], +) (*connect.Response[querierv1.ProfileTypesResponse], error) { + if r.frontend != nil { + return r.frontend.ProfileTypes(ctx, req) + } + return connect.NewResponse(&querierv1.ProfileTypesResponse{}), nil +} diff --git a/pkg/frontend/read_path/read_path.go b/pkg/frontend/read_path/read_path.go new file mode 100644 index 0000000000..673f191047 --- /dev/null +++ b/pkg/frontend/read_path/read_path.go @@ -0,0 +1,20 @@ +package read_path + +import ( + "flag" + "time" + + "github.com/grafana/dskit/flagext" +) + +type Config struct { + EnableQueryBackend bool `yaml:"enable_query_backend" json:"enable_query_backend" doc:"hidden"` + EnableQueryBackendFrom time.Time `yaml:"enable_query_backend_from" json:"enable_query_backend_from" doc:"hidden"` +} + +func (o *Config) RegisterFlags(f *flag.FlagSet) { + f.BoolVar(&o.EnableQueryBackend, "enable-query-backend", false, + "This parameter specifies whether the new query backend is enabled.") + f.Var((*flagext.Time)(&o.EnableQueryBackendFrom), "enable-query-backend-from", + "This parameter specifies the point in time from which data is queried from the new query backend.") +} diff --git a/pkg/frontend/read_path/read_path_test.go b/pkg/frontend/read_path/read_path_test.go new file mode 100644 index 0000000000..cdbdf461c3 --- /dev/null +++ b/pkg/frontend/read_path/read_path_test.go @@ -0,0 +1,195 @@ +package read_path + +import ( + "context" + "io" + "math" + "testing" + "time" + + "connectrpc.com/connect" + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + "github.com/grafana/pyroscope/pkg/tenant" + "github.com/grafana/pyroscope/pkg/test/mocks/mockquerierv1connect" +) + +type routerTestSuite struct { + suite.Suite + + router *Router + logger log.Logger + registry *prometheus.Registry + + overrides *mockOverrides + frontend *mockquerierv1connect.MockQuerierServiceClient + backend *mockquerierv1connect.MockQuerierServiceClient + + ctx context.Context +} + +type mockOverrides struct{ mock.Mock } + +func (m *mockOverrides) ReadPathOverrides(tenantID string) Config { + args := m.Called(tenantID) + return args.Get(0).(Config) +} + +func (s *routerTestSuite) SetupTest() { + s.logger = log.NewLogfmtLogger(io.Discard) + s.registry = prometheus.NewRegistry() + s.overrides = new(mockOverrides) + s.frontend = new(mockquerierv1connect.MockQuerierServiceClient) + s.backend = new(mockquerierv1connect.MockQuerierServiceClient) + s.router = NewRouter( + s.logger, + s.overrides, + s.frontend, + s.backend, + ) + s.ctx = tenant.InjectTenantID(context.Background(), "tenant-a") +} + +func (s *routerTestSuite) BeforeTest(_, _ string) {} + +func (s *routerTestSuite) AfterTest(_, _ string) { + s.overrides.AssertExpectations(s.T()) + s.frontend.AssertExpectations(s.T()) + s.backend.AssertExpectations(s.T()) +} + +func TestRouterSuite(t *testing.T) { suite.Run(t, new(routerTestSuite)) } + +func (s *routerTestSuite) Test_FrontendOnly() { + s.overrides.On("ReadPathOverrides", "tenant-a").Return(Config{EnableQueryBackend: false}) + + expected := connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"foo", "bar"}}) + s.frontend.On("LabelNames", mock.Anything, mock.Anything).Return(expected, nil).Once() + + resp, err := s.router.LabelNames(s.ctx, connect.NewRequest(&typesv1.LabelNamesRequest{})) + s.Require().NoError(err) + s.Assert().Equal(expected, resp) +} + +func (s *routerTestSuite) Test_BackendOnly() { + s.overrides.On("ReadPathOverrides", "tenant-a").Return(Config{EnableQueryBackend: true}) + + expected := connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"foo", "bar"}}) + s.backend.On("LabelNames", mock.Anything, mock.Anything).Return(expected, nil).Once() + + resp, err := s.router.LabelNames(s.ctx, connect.NewRequest(&typesv1.LabelNamesRequest{})) + s.Require().NoError(err) + s.Assert().Equal(expected, resp) +} + +func (s *routerTestSuite) Test_Combined() { + s.overrides.On("ReadPathOverrides", "tenant-a").Return(Config{ + EnableQueryBackend: true, + EnableQueryBackendFrom: time.Unix(20, 0), + }) + + req1 := connect.NewRequest(&typesv1.LabelNamesRequest{Start: 10, End: 19999}) + resp1 := connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"foo", "bar"}}) + s.frontend.On("LabelNames", mock.Anything, req1).Return(resp1, nil).Once() + + req2 := connect.NewRequest(&typesv1.LabelNamesRequest{Start: 20000, End: math.MaxInt64}) + resp2 := connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"baz", "foo", "qux"}}) + s.backend.On("LabelNames", mock.Anything, req2).Return(resp2, nil).Once() + + expected := connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"bar", "baz", "foo", "qux"}}) + resp, err := s.router.LabelNames(s.ctx, connect.NewRequest(&typesv1.LabelNamesRequest{ + Start: 10, + End: math.MaxInt64, + })) + + s.Require().NoError(err) + s.Assert().Equal(expected, resp) +} + +func (s *routerTestSuite) Test_Combined_BeforeSplit() { + s.overrides.On("ReadPathOverrides", "tenant-a").Return(Config{ + EnableQueryBackend: true, + EnableQueryBackendFrom: time.Unix(20, 0), + }) + + expected := connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"foo", "bar"}}) + req := connect.NewRequest(&typesv1.LabelNamesRequest{Start: 10, End: 10000}) + s.frontend.On("LabelNames", mock.Anything, req).Return(expected, nil).Once() + + resp, err := s.router.LabelNames(s.ctx, req) + s.Require().NoError(err) + s.Assert().Equal(expected, resp) +} + +func (s *routerTestSuite) Test_Combined_AfterSplit() { + s.overrides.On("ReadPathOverrides", "tenant-a").Return(Config{ + EnableQueryBackend: true, + EnableQueryBackendFrom: time.Unix(20, 0), + }) + + expected := connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"foo", "bar"}}) + req := connect.NewRequest(&typesv1.LabelNamesRequest{Start: 30000, End: 40000}) + s.backend.On("LabelNames", mock.Anything, req).Return(expected, nil).Once() + + resp, err := s.router.LabelNames(s.ctx, req) + s.Require().NoError(err) + s.Assert().Equal(expected, resp) +} + +func (s *routerTestSuite) Test_LabelNames() { + s.overrides.On("ReadPathOverrides", "tenant-a").Return(Config{ + EnableQueryBackend: true, + EnableQueryBackendFrom: time.Unix(5, 0), + }) + + req := connect.NewRequest(&typesv1.LabelNamesRequest{Start: 10, End: 10000}) + expected := connect.NewResponse(&typesv1.LabelNamesResponse{Names: []string{"bar", "foo"}}) + s.frontend.On("LabelNames", mock.Anything, mock.Anything).Return(expected, nil).Once() + s.backend.On("LabelNames", mock.Anything, mock.Anything).Return(expected, nil).Once() + + resp, err := s.router.LabelNames(s.ctx, req) + s.Require().NoError(err) + s.Assert().Equal(expected, resp) +} + +func (s *routerTestSuite) Test_LabelValues() { + s.overrides.On("ReadPathOverrides", "tenant-a").Return(Config{ + EnableQueryBackend: true, + EnableQueryBackendFrom: time.Unix(5, 0), + }) + + req := connect.NewRequest(&typesv1.LabelValuesRequest{Start: 10, End: 10000}) + expected := connect.NewResponse(&typesv1.LabelValuesResponse{Names: []string{"bar", "foo"}}) + s.frontend.On("LabelValues", mock.Anything, mock.Anything).Return(expected, nil).Once() + s.backend.On("LabelValues", mock.Anything, mock.Anything).Return(expected, nil).Once() + + resp, err := s.router.LabelValues(s.ctx, req) + s.Require().NoError(err) + s.Assert().Equal(expected, resp) +} + +func (s *routerTestSuite) Test_Series() { + s.overrides.On("ReadPathOverrides", "tenant-a").Return(Config{ + EnableQueryBackend: true, + EnableQueryBackendFrom: time.Unix(5, 0), + }) + + req := connect.NewRequest(&querierv1.SeriesRequest{Start: 10, End: 10000}) + expected := connect.NewResponse(&querierv1.SeriesResponse{ + LabelsSet: []*typesv1.Labels{ + {Labels: []*typesv1.LabelPair{{Name: "foo", Value: "bar"}}}, + }, + }) + + s.frontend.On("Series", mock.Anything, mock.Anything).Return(expected, nil).Once() + s.backend.On("Series", mock.Anything, mock.Anything).Return(expected, nil).Once() + + resp, err := s.router.Series(s.ctx, req) + s.Require().NoError(err) + s.Assert().Equal(expected, resp) +} diff --git a/pkg/frontend/read_path/router.go b/pkg/frontend/read_path/router.go new file mode 100644 index 0000000000..765b4d7311 --- /dev/null +++ b/pkg/frontend/read_path/router.go @@ -0,0 +1,159 @@ +package read_path + +import ( + "context" + "time" + + "connectrpc.com/connect" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/tenant" + "github.com/prometheus/common/model" + "golang.org/x/sync/errgroup" + + querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" + "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1/querierv1connect" + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" + phlaremodel "github.com/grafana/pyroscope/pkg/model" +) + +type Overrides interface { + ReadPathOverrides(tenantID string) Config +} + +// Router is a proxy that routes queries to the querier frontend +// or the backend querier service directly, bypassing the scheduler +// and querier services. +type Router struct { + logger log.Logger + overrides Overrides + + frontend querierv1connect.QuerierServiceClient + backend querierv1connect.QuerierServiceClient +} + +func NewRouter( + logger log.Logger, + overrides Overrides, + frontend querierv1connect.QuerierServiceClient, + backend querierv1connect.QuerierServiceClient, +) *Router { + return &Router{ + logger: logger, + overrides: overrides, + frontend: frontend, + backend: backend, + } +} + +func Query[Req, Resp any]( + ctx context.Context, + router *Router, + req *connect.Request[Req], + aggregate func(a, b *Resp) (*Resp, error), +) (*connect.Response[Resp], error) { + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, connect.NewError(connect.CodeInvalidArgument, err) + } + if len(tenantIDs) != 1 { + level.Warn(router.logger).Log("msg", "ignoring inter-tenant query overrides", "tenants", tenantIDs) + } + tenantID := tenantIDs[0] + + // Verbose but explicit. Note that limits, error handling, etc., + // are delegated to the callee. + overrides := router.overrides.ReadPathOverrides(tenantID) + if !overrides.EnableQueryBackend { + return query[Req, Resp](ctx, router.frontend, req) + } + // Note: the old read path includes both start and end: [start, end]. + // The new read path does not include end: [start, end). + split := model.TimeFromUnixNano(overrides.EnableQueryBackendFrom.UnixNano()) + queryRange := phlaremodel.GetSafeTimeRange(time.Now(), req.Msg) + if split.After(queryRange.End) { + return query[Req, Resp](ctx, router.frontend, req) + } + if split.Before(queryRange.Start) { + return query[Req, Resp](ctx, router.backend, req) + } + + // We need to send requests both to the old and new read paths: + // [start, split](split, end), which translates to + // [start, split-1][split, end). + c, ok := (any)(req.Msg).(interface{ CloneVT() *Req }) + if !ok { + return nil, connect.NewError(connect.CodeUnimplemented, nil) + } + cloned := c.CloneVT() + phlaremodel.SetTimeRange(req.Msg, queryRange.Start, split-1) + phlaremodel.SetTimeRange(cloned, split, queryRange.End) + + var a, b *connect.Response[Resp] + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { + var err error + a, err = query[Req, Resp](ctx, router.frontend, req) + return err + }) + g.Go(func() error { + var err error + b, err = query[Req, Resp](ctx, router.backend, connect.NewRequest(cloned)) + return err + }) + if err = g.Wait(); err != nil { + return nil, err + } + + resp, err := aggregate(a.Msg, b.Msg) + if err != nil || resp == nil { + return nil, err + } + + return connect.NewResponse(resp), nil +} + +func query[Req, Resp any]( + ctx context.Context, + svc querierv1connect.QuerierServiceClient, + req *connect.Request[Req], +) (*connect.Response[Resp], error) { + var resp any + var err error + + switch r := (any)(req).(type) { + case *connect.Request[querierv1.ProfileTypesRequest]: + resp, err = svc.ProfileTypes(ctx, r) + case *connect.Request[typesv1.GetProfileStatsRequest]: + resp, err = svc.GetProfileStats(ctx, r) + case *connect.Request[querierv1.AnalyzeQueryRequest]: + resp, err = svc.AnalyzeQuery(ctx, r) + + case *connect.Request[typesv1.LabelNamesRequest]: + resp, err = svc.LabelNames(ctx, r) + case *connect.Request[typesv1.LabelValuesRequest]: + resp, err = svc.LabelValues(ctx, r) + case *connect.Request[querierv1.SeriesRequest]: + resp, err = svc.Series(ctx, r) + + case *connect.Request[querierv1.SelectMergeStacktracesRequest]: + resp, err = svc.SelectMergeStacktraces(ctx, r) + case *connect.Request[querierv1.SelectMergeSpanProfileRequest]: + resp, err = svc.SelectMergeSpanProfile(ctx, r) + case *connect.Request[querierv1.SelectMergeProfileRequest]: + resp, err = svc.SelectMergeProfile(ctx, r) + case *connect.Request[querierv1.SelectSeriesRequest]: + resp, err = svc.SelectSeries(ctx, r) + case *connect.Request[querierv1.DiffRequest]: + resp, err = svc.Diff(ctx, r) + + default: + return nil, connect.NewError(connect.CodeUnimplemented, nil) + } + + if err != nil || resp == nil { + return nil, err + } + + return resp.(*connect.Response[Resp]), nil +} diff --git a/pkg/model/flamegraph_diff.go b/pkg/model/flamegraph_diff.go index 852aa90fbd..4194b8935c 100644 --- a/pkg/model/flamegraph_diff.go +++ b/pkg/model/flamegraph_diff.go @@ -146,6 +146,18 @@ func NewFlamegraphDiff(left, right *Tree, maxNodes int64) (*querierv1.FlameGraph return res, nil } +func NewFlamegraphDiffFromBytes(left, right []byte, maxNodes int64) (*querierv1.FlameGraphDiff, error) { + l, err := UnmarshalTree(left) + if err != nil { + return nil, err + } + r, err := UnmarshalTree(right) + if err != nil { + return nil, err + } + return NewFlamegraphDiff(l, r, maxNodes) +} + // combineTree aligns 2 trees by making them having the same structure with the // same number of nodes // It also makes the tree have a single root diff --git a/pkg/model/time.go b/pkg/model/time.go index 21bb02f2c6..975684d091 100644 --- a/pkg/model/time.go +++ b/pkg/model/time.go @@ -1,6 +1,11 @@ package model -import "github.com/prometheus/common/model" +import ( + "reflect" + "time" + + "github.com/prometheus/common/model" +) // TimeRangeRequest is a request that has a time interval. type TimeRangeRequest interface { @@ -19,3 +24,34 @@ func GetTimeRange(req TimeRangeRequest) (model.Interval, bool) { End: model.Time(req.GetEnd()), }, true } + +func GetSafeTimeRange(now time.Time, req any) model.Interval { + if r, ok := req.(TimeRangeRequest); ok { + x, ok := GetTimeRange(r) + if ok { + return x + } + } + return model.Interval{ + Start: model.Time(now.Add(-time.Hour).UnixMilli()), + End: model.Time(now.UnixMilli()), + } +} + +func SetTimeRange(r interface{}, startTime, endTime model.Time) bool { + const startFieldName = "Start" + const endFieldName = "End" + defer func() { _ = recover() }() + v := reflect.ValueOf(r).Elem() + startField := v.FieldByName(startFieldName) + endField := v.FieldByName(endFieldName) + if !startField.IsValid() || !endField.IsValid() { + return false + } + if startField.Kind() != reflect.Int64 || endField.Kind() != reflect.Int64 { + return false + } + startField.SetInt(int64(startTime)) + endField.SetInt(int64(endTime)) + return true +} diff --git a/pkg/model/time_test.go b/pkg/model/time_test.go new file mode 100644 index 0000000000..c9957b6308 --- /dev/null +++ b/pkg/model/time_test.go @@ -0,0 +1,22 @@ +package model + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" +) + +func Test_SetTimeRange(t *testing.T) { + t.Run("Type has time range fields", func(t *testing.T) { + r := new(typesv1.LabelNamesRequest) + assert.True(t, SetTimeRange(r, 1, 2)) + assert.Equal(t, int64(1), r.Start) + assert.Equal(t, int64(2), r.End) + }) + t.Run("Type has no time range fields", func(t *testing.T) { + r := new(struct{}) + assert.False(t, SetTimeRange(r, 1, 2)) + }) +} diff --git a/pkg/phlare/modules.go b/pkg/phlare/modules.go index 47321be828..4ae954f72d 100644 --- a/pkg/phlare/modules.go +++ b/pkg/phlare/modules.go @@ -40,12 +40,15 @@ import ( "github.com/grafana/pyroscope/pkg/distributor" "github.com/grafana/pyroscope/pkg/embedded/grafana" "github.com/grafana/pyroscope/pkg/frontend" + readpath "github.com/grafana/pyroscope/pkg/frontend/read_path" + queryfrontend "github.com/grafana/pyroscope/pkg/frontend/read_path/query_frontend" "github.com/grafana/pyroscope/pkg/ingester" objstoreclient "github.com/grafana/pyroscope/pkg/objstore/client" "github.com/grafana/pyroscope/pkg/objstore/providers/filesystem" "github.com/grafana/pyroscope/pkg/operations" phlarecontext "github.com/grafana/pyroscope/pkg/phlare/context" "github.com/grafana/pyroscope/pkg/querier" + "github.com/grafana/pyroscope/pkg/querier/vcs" "github.com/grafana/pyroscope/pkg/querier/worker" "github.com/grafana/pyroscope/pkg/scheduler" "github.com/grafana/pyroscope/pkg/settings" @@ -116,15 +119,44 @@ func (f *Phlare) initQueryFrontend() (services.Service, error) { if err != nil { return nil, err } - - f.API.RegisterPyroscopeHandlers(frontendSvc) - f.API.RegisterQueryFrontend(frontendSvc) - f.API.RegisterQuerier(frontendSvc) f.frontend = frontendSvc + f.API.RegisterFrontendForQuerierHandler(frontendSvc) + if !f.Cfg.v2Experiment { + f.API.RegisterQuerierServiceHandler(frontendSvc) + f.API.RegisterPyroscopeHandlers(frontendSvc) + f.API.RegisterVCSServiceHandler(frontendSvc) + } else { + f.initReadPathRouter() + } return frontendSvc, nil } +func (f *Phlare) initReadPathRouter() { + vcsService := vcs.New( + log.With(f.logger, "component", "vcs-service"), + f.reg, + ) + + newFrontend := queryfrontend.NewQueryFrontend( + log.With(f.logger, "component", "query-frontend"), + f.Overrides, + f.metastoreClient, + f.queryBackendClient, + ) + + router := readpath.NewRouter( + log.With(f.logger, "component", "read-path-router"), + f.Overrides, + f.frontend, + newFrontend, + ) + + f.API.RegisterQuerierServiceHandler(router) + f.API.RegisterPyroscopeHandlers(router) + f.API.RegisterVCSServiceHandler(vcsService) +} + func (f *Phlare) initRuntimeConfig() (services.Service, error) { if len(f.Cfg.RuntimeConfig.LoadPath) == 0 { // no need to initialize module if load path is empty @@ -279,7 +311,8 @@ func (f *Phlare) initQuerier() (services.Service, error) { if !f.isModuleActive(QueryFrontend) { f.API.RegisterPyroscopeHandlers(querierSvc) - f.API.RegisterQuerier(querierSvc) + f.API.RegisterQuerierServiceHandler(querierSvc) + f.API.RegisterVCSServiceHandler(querierSvc) } qWorker, err := worker.NewQuerierWorker(f.Cfg.Worker, querier.NewGRPCHandler(querierSvc), log.With(f.logger, "component", "querier-worker"), f.reg) if err != nil { diff --git a/pkg/phlare/modules_experimental.go b/pkg/phlare/modules_experimental.go index ebfb4c54cf..e9cada43f3 100644 --- a/pkg/phlare/modules_experimental.go +++ b/pkg/phlare/modules_experimental.go @@ -12,8 +12,8 @@ import ( segmentwriterclient "github.com/grafana/pyroscope/pkg/experiment/ingester/client" "github.com/grafana/pyroscope/pkg/experiment/metastore" metastoreclient "github.com/grafana/pyroscope/pkg/experiment/metastore/client" - "github.com/grafana/pyroscope/pkg/experiment/querybackend" - querybackendclient "github.com/grafana/pyroscope/pkg/experiment/querybackend/client" + querybackend "github.com/grafana/pyroscope/pkg/experiment/query_backend" + querybackendclient "github.com/grafana/pyroscope/pkg/experiment/query_backend/client" "github.com/grafana/pyroscope/pkg/util/health" ) diff --git a/pkg/phlare/phlare.go b/pkg/phlare/phlare.go index 2beff5a191..ed47fcde70 100644 --- a/pkg/phlare/phlare.go +++ b/pkg/phlare/phlare.go @@ -49,8 +49,8 @@ import ( segmentwriterclient "github.com/grafana/pyroscope/pkg/experiment/ingester/client" "github.com/grafana/pyroscope/pkg/experiment/metastore" metastoreclient "github.com/grafana/pyroscope/pkg/experiment/metastore/client" - "github.com/grafana/pyroscope/pkg/experiment/querybackend" - querybackendclient "github.com/grafana/pyroscope/pkg/experiment/querybackend/client" + querybackend "github.com/grafana/pyroscope/pkg/experiment/query_backend" + querybackendclient "github.com/grafana/pyroscope/pkg/experiment/query_backend/client" "github.com/grafana/pyroscope/pkg/frontend" "github.com/grafana/pyroscope/pkg/ingester" phlareobj "github.com/grafana/pyroscope/pkg/objstore" @@ -188,26 +188,32 @@ func (c *Config) registerServerFlagsWithChangedDefaultValues(fs *flag.FlagSet) { c.Worker.RegisterFlags(throwaway) c.OverridesExporter.RegisterFlags(throwaway, log.NewLogfmtLogger(os.Stderr)) + overrides := map[string]string{ + "server.http-listen-port": "4040", + "distributor.replication-factor": "1", + "query-scheduler.service-discovery-mode": schedulerdiscovery.ModeRing, + } + c.v2Experiment = os.Getenv("PYROSCOPE_V2_EXPERIMENT") != "" if c.v2Experiment { + for k, v := range map[string]string{ + "server.grpc-max-recv-msg-size-bytes": "104857600", + "server.grpc-max-send-msg-size-bytes": "104857600", + "server.grpc.keepalive.min-time-between-pings": "1s", + "segment-writer.grpc-client-config.connect-timeout": "1s", + "segment-writer.num-tokens": "4", + "segment-writer.heartbeat-timeout": "1m", + "segment-writer.unregister-on-shutdown": "false", + } { + overrides[k] = v + } + c.Metastore.RegisterFlags(throwaway) c.SegmentWriter.RegisterFlags(throwaway) c.QueryBackend.RegisterFlags(throwaway) c.CompactionWorker.RegisterFlags(throwaway) c.LimitsConfig.WritePathOverrides.RegisterFlags(throwaway) - } - - overrides := map[string]string{ - "server.http-listen-port": "4040", - "distributor.replication-factor": "1", - "query-scheduler.service-discovery-mode": schedulerdiscovery.ModeRing, - "server.grpc-max-recv-msg-size-bytes": "104857600", - "server.grpc-max-send-msg-size-bytes": "104857600", - "server.grpc.keepalive.min-time-between-pings": "1s", - "segment-writer.grpc-client-config.connect-timeout": "1s", - "segment-writer.num-tokens": "4", - "segment-writer.heartbeat-timeout": "1m", - "segment-writer.unregister-on-shutdown": "false", + c.LimitsConfig.ReadPathOverrides.RegisterFlags(throwaway) } throwaway.VisitAll(func(f *flag.Flag) { diff --git a/pkg/pprof/merge.go b/pkg/pprof/merge.go index b0da12f061..f162f32990 100644 --- a/pkg/pprof/merge.go +++ b/pkg/pprof/merge.go @@ -89,6 +89,14 @@ func (m *ProfileMerge) Merge(p *profilev1.Profile) error { return nil } +func (m *ProfileMerge) MergeBytes(b []byte) error { + var p profilev1.Profile + if err := Unmarshal(b, &p); err != nil { + return err + } + return m.Merge(&p) +} + func (m *ProfileMerge) Profile() *profilev1.Profile { if m.profile == nil { return &profilev1.Profile{ diff --git a/pkg/pprof/pprof.go b/pkg/pprof/pprof.go index ef4fda5b01..3d2d0ca8d8 100644 --- a/pkg/pprof/pprof.go +++ b/pkg/pprof/pprof.go @@ -1157,6 +1157,14 @@ func Marshal(p *profilev1.Profile, compress bool) ([]byte, error) { return buf.Bytes(), nil } +func MustMarshal(p *profilev1.Profile, compress bool) []byte { + b, err := Marshal(p, compress) + if err != nil { + panic(err) + } + return b +} + func Unmarshal(data []byte, p *profilev1.Profile) error { gr := gzipReaderPool.Get().(*gzipReader) defer gzipReaderPool.Put(gr) diff --git a/pkg/querier/vcs/client/metrics.go b/pkg/querier/vcs/client/metrics.go index f8b28c95ef..7706737e8b 100644 --- a/pkg/querier/vcs/client/metrics.go +++ b/pkg/querier/vcs/client/metrics.go @@ -9,7 +9,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" "github.com/grafana/pyroscope/pkg/util" ) @@ -31,7 +30,7 @@ var ( ) func InstrumentedHTTPClient(logger log.Logger, reg prometheus.Registerer) *http.Client { - apiDuration := promauto.With(reg).NewHistogramVec( + apiDuration := util.RegisterOrGet(reg, prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "pyroscope", Name: "vcs_github_request_duration", @@ -39,7 +38,7 @@ func InstrumentedHTTPClient(logger log.Logger, reg prometheus.Registerer) *http. Buckets: prometheus.ExponentialBucketsRange(0.1, 10, 8), }, []string{"method", "route", "status_code"}, - ) + )) defaultClient := &http.Client{ Timeout: 10 * time.Second, diff --git a/pkg/test/mocks/mockquerierv1connect/mock_querier_service_client.go b/pkg/test/mocks/mockquerierv1connect/mock_querier_service_client.go new file mode 100644 index 0000000000..4e0a442c69 --- /dev/null +++ b/pkg/test/mocks/mockquerierv1connect/mock_querier_service_client.go @@ -0,0 +1,693 @@ +// Code generated by mockery. DO NOT EDIT. + +package mockquerierv1connect + +import ( + context "context" + + connect "connectrpc.com/connect" + + googlev1 "github.com/grafana/pyroscope/api/gen/proto/go/google/v1" + + mock "github.com/stretchr/testify/mock" + + querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1" + + typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1" +) + +// MockQuerierServiceClient is an autogenerated mock type for the QuerierServiceClient type +type MockQuerierServiceClient struct { + mock.Mock +} + +type MockQuerierServiceClient_Expecter struct { + mock *mock.Mock +} + +func (_m *MockQuerierServiceClient) EXPECT() *MockQuerierServiceClient_Expecter { + return &MockQuerierServiceClient_Expecter{mock: &_m.Mock} +} + +// AnalyzeQuery provides a mock function with given fields: _a0, _a1 +func (_m *MockQuerierServiceClient) AnalyzeQuery(_a0 context.Context, _a1 *connect.Request[querierv1.AnalyzeQueryRequest]) (*connect.Response[querierv1.AnalyzeQueryResponse], error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for AnalyzeQuery") + } + + var r0 *connect.Response[querierv1.AnalyzeQueryResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.AnalyzeQueryRequest]) (*connect.Response[querierv1.AnalyzeQueryResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.AnalyzeQueryRequest]) *connect.Response[querierv1.AnalyzeQueryResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[querierv1.AnalyzeQueryResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[querierv1.AnalyzeQueryRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQuerierServiceClient_AnalyzeQuery_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AnalyzeQuery' +type MockQuerierServiceClient_AnalyzeQuery_Call struct { + *mock.Call +} + +// AnalyzeQuery is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *connect.Request[querierv1.AnalyzeQueryRequest] +func (_e *MockQuerierServiceClient_Expecter) AnalyzeQuery(_a0 interface{}, _a1 interface{}) *MockQuerierServiceClient_AnalyzeQuery_Call { + return &MockQuerierServiceClient_AnalyzeQuery_Call{Call: _e.mock.On("AnalyzeQuery", _a0, _a1)} +} + +func (_c *MockQuerierServiceClient_AnalyzeQuery_Call) Run(run func(_a0 context.Context, _a1 *connect.Request[querierv1.AnalyzeQueryRequest])) *MockQuerierServiceClient_AnalyzeQuery_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*connect.Request[querierv1.AnalyzeQueryRequest])) + }) + return _c +} + +func (_c *MockQuerierServiceClient_AnalyzeQuery_Call) Return(_a0 *connect.Response[querierv1.AnalyzeQueryResponse], _a1 error) *MockQuerierServiceClient_AnalyzeQuery_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQuerierServiceClient_AnalyzeQuery_Call) RunAndReturn(run func(context.Context, *connect.Request[querierv1.AnalyzeQueryRequest]) (*connect.Response[querierv1.AnalyzeQueryResponse], error)) *MockQuerierServiceClient_AnalyzeQuery_Call { + _c.Call.Return(run) + return _c +} + +// Diff provides a mock function with given fields: _a0, _a1 +func (_m *MockQuerierServiceClient) Diff(_a0 context.Context, _a1 *connect.Request[querierv1.DiffRequest]) (*connect.Response[querierv1.DiffResponse], error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Diff") + } + + var r0 *connect.Response[querierv1.DiffResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.DiffRequest]) (*connect.Response[querierv1.DiffResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.DiffRequest]) *connect.Response[querierv1.DiffResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[querierv1.DiffResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[querierv1.DiffRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQuerierServiceClient_Diff_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Diff' +type MockQuerierServiceClient_Diff_Call struct { + *mock.Call +} + +// Diff is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *connect.Request[querierv1.DiffRequest] +func (_e *MockQuerierServiceClient_Expecter) Diff(_a0 interface{}, _a1 interface{}) *MockQuerierServiceClient_Diff_Call { + return &MockQuerierServiceClient_Diff_Call{Call: _e.mock.On("Diff", _a0, _a1)} +} + +func (_c *MockQuerierServiceClient_Diff_Call) Run(run func(_a0 context.Context, _a1 *connect.Request[querierv1.DiffRequest])) *MockQuerierServiceClient_Diff_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*connect.Request[querierv1.DiffRequest])) + }) + return _c +} + +func (_c *MockQuerierServiceClient_Diff_Call) Return(_a0 *connect.Response[querierv1.DiffResponse], _a1 error) *MockQuerierServiceClient_Diff_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQuerierServiceClient_Diff_Call) RunAndReturn(run func(context.Context, *connect.Request[querierv1.DiffRequest]) (*connect.Response[querierv1.DiffResponse], error)) *MockQuerierServiceClient_Diff_Call { + _c.Call.Return(run) + return _c +} + +// GetProfileStats provides a mock function with given fields: _a0, _a1 +func (_m *MockQuerierServiceClient) GetProfileStats(_a0 context.Context, _a1 *connect.Request[typesv1.GetProfileStatsRequest]) (*connect.Response[typesv1.GetProfileStatsResponse], error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetProfileStats") + } + + var r0 *connect.Response[typesv1.GetProfileStatsResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[typesv1.GetProfileStatsRequest]) (*connect.Response[typesv1.GetProfileStatsResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[typesv1.GetProfileStatsRequest]) *connect.Response[typesv1.GetProfileStatsResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[typesv1.GetProfileStatsResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[typesv1.GetProfileStatsRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQuerierServiceClient_GetProfileStats_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProfileStats' +type MockQuerierServiceClient_GetProfileStats_Call struct { + *mock.Call +} + +// GetProfileStats is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *connect.Request[typesv1.GetProfileStatsRequest] +func (_e *MockQuerierServiceClient_Expecter) GetProfileStats(_a0 interface{}, _a1 interface{}) *MockQuerierServiceClient_GetProfileStats_Call { + return &MockQuerierServiceClient_GetProfileStats_Call{Call: _e.mock.On("GetProfileStats", _a0, _a1)} +} + +func (_c *MockQuerierServiceClient_GetProfileStats_Call) Run(run func(_a0 context.Context, _a1 *connect.Request[typesv1.GetProfileStatsRequest])) *MockQuerierServiceClient_GetProfileStats_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*connect.Request[typesv1.GetProfileStatsRequest])) + }) + return _c +} + +func (_c *MockQuerierServiceClient_GetProfileStats_Call) Return(_a0 *connect.Response[typesv1.GetProfileStatsResponse], _a1 error) *MockQuerierServiceClient_GetProfileStats_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQuerierServiceClient_GetProfileStats_Call) RunAndReturn(run func(context.Context, *connect.Request[typesv1.GetProfileStatsRequest]) (*connect.Response[typesv1.GetProfileStatsResponse], error)) *MockQuerierServiceClient_GetProfileStats_Call { + _c.Call.Return(run) + return _c +} + +// LabelNames provides a mock function with given fields: _a0, _a1 +func (_m *MockQuerierServiceClient) LabelNames(_a0 context.Context, _a1 *connect.Request[typesv1.LabelNamesRequest]) (*connect.Response[typesv1.LabelNamesResponse], error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for LabelNames") + } + + var r0 *connect.Response[typesv1.LabelNamesResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[typesv1.LabelNamesRequest]) (*connect.Response[typesv1.LabelNamesResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[typesv1.LabelNamesRequest]) *connect.Response[typesv1.LabelNamesResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[typesv1.LabelNamesResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[typesv1.LabelNamesRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQuerierServiceClient_LabelNames_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LabelNames' +type MockQuerierServiceClient_LabelNames_Call struct { + *mock.Call +} + +// LabelNames is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *connect.Request[typesv1.LabelNamesRequest] +func (_e *MockQuerierServiceClient_Expecter) LabelNames(_a0 interface{}, _a1 interface{}) *MockQuerierServiceClient_LabelNames_Call { + return &MockQuerierServiceClient_LabelNames_Call{Call: _e.mock.On("LabelNames", _a0, _a1)} +} + +func (_c *MockQuerierServiceClient_LabelNames_Call) Run(run func(_a0 context.Context, _a1 *connect.Request[typesv1.LabelNamesRequest])) *MockQuerierServiceClient_LabelNames_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*connect.Request[typesv1.LabelNamesRequest])) + }) + return _c +} + +func (_c *MockQuerierServiceClient_LabelNames_Call) Return(_a0 *connect.Response[typesv1.LabelNamesResponse], _a1 error) *MockQuerierServiceClient_LabelNames_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQuerierServiceClient_LabelNames_Call) RunAndReturn(run func(context.Context, *connect.Request[typesv1.LabelNamesRequest]) (*connect.Response[typesv1.LabelNamesResponse], error)) *MockQuerierServiceClient_LabelNames_Call { + _c.Call.Return(run) + return _c +} + +// LabelValues provides a mock function with given fields: _a0, _a1 +func (_m *MockQuerierServiceClient) LabelValues(_a0 context.Context, _a1 *connect.Request[typesv1.LabelValuesRequest]) (*connect.Response[typesv1.LabelValuesResponse], error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for LabelValues") + } + + var r0 *connect.Response[typesv1.LabelValuesResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[typesv1.LabelValuesRequest]) (*connect.Response[typesv1.LabelValuesResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[typesv1.LabelValuesRequest]) *connect.Response[typesv1.LabelValuesResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[typesv1.LabelValuesResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[typesv1.LabelValuesRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQuerierServiceClient_LabelValues_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LabelValues' +type MockQuerierServiceClient_LabelValues_Call struct { + *mock.Call +} + +// LabelValues is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *connect.Request[typesv1.LabelValuesRequest] +func (_e *MockQuerierServiceClient_Expecter) LabelValues(_a0 interface{}, _a1 interface{}) *MockQuerierServiceClient_LabelValues_Call { + return &MockQuerierServiceClient_LabelValues_Call{Call: _e.mock.On("LabelValues", _a0, _a1)} +} + +func (_c *MockQuerierServiceClient_LabelValues_Call) Run(run func(_a0 context.Context, _a1 *connect.Request[typesv1.LabelValuesRequest])) *MockQuerierServiceClient_LabelValues_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*connect.Request[typesv1.LabelValuesRequest])) + }) + return _c +} + +func (_c *MockQuerierServiceClient_LabelValues_Call) Return(_a0 *connect.Response[typesv1.LabelValuesResponse], _a1 error) *MockQuerierServiceClient_LabelValues_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQuerierServiceClient_LabelValues_Call) RunAndReturn(run func(context.Context, *connect.Request[typesv1.LabelValuesRequest]) (*connect.Response[typesv1.LabelValuesResponse], error)) *MockQuerierServiceClient_LabelValues_Call { + _c.Call.Return(run) + return _c +} + +// ProfileTypes provides a mock function with given fields: _a0, _a1 +func (_m *MockQuerierServiceClient) ProfileTypes(_a0 context.Context, _a1 *connect.Request[querierv1.ProfileTypesRequest]) (*connect.Response[querierv1.ProfileTypesResponse], error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for ProfileTypes") + } + + var r0 *connect.Response[querierv1.ProfileTypesResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.ProfileTypesRequest]) (*connect.Response[querierv1.ProfileTypesResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.ProfileTypesRequest]) *connect.Response[querierv1.ProfileTypesResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[querierv1.ProfileTypesResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[querierv1.ProfileTypesRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQuerierServiceClient_ProfileTypes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProfileTypes' +type MockQuerierServiceClient_ProfileTypes_Call struct { + *mock.Call +} + +// ProfileTypes is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *connect.Request[querierv1.ProfileTypesRequest] +func (_e *MockQuerierServiceClient_Expecter) ProfileTypes(_a0 interface{}, _a1 interface{}) *MockQuerierServiceClient_ProfileTypes_Call { + return &MockQuerierServiceClient_ProfileTypes_Call{Call: _e.mock.On("ProfileTypes", _a0, _a1)} +} + +func (_c *MockQuerierServiceClient_ProfileTypes_Call) Run(run func(_a0 context.Context, _a1 *connect.Request[querierv1.ProfileTypesRequest])) *MockQuerierServiceClient_ProfileTypes_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*connect.Request[querierv1.ProfileTypesRequest])) + }) + return _c +} + +func (_c *MockQuerierServiceClient_ProfileTypes_Call) Return(_a0 *connect.Response[querierv1.ProfileTypesResponse], _a1 error) *MockQuerierServiceClient_ProfileTypes_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQuerierServiceClient_ProfileTypes_Call) RunAndReturn(run func(context.Context, *connect.Request[querierv1.ProfileTypesRequest]) (*connect.Response[querierv1.ProfileTypesResponse], error)) *MockQuerierServiceClient_ProfileTypes_Call { + _c.Call.Return(run) + return _c +} + +// SelectMergeProfile provides a mock function with given fields: _a0, _a1 +func (_m *MockQuerierServiceClient) SelectMergeProfile(_a0 context.Context, _a1 *connect.Request[querierv1.SelectMergeProfileRequest]) (*connect.Response[googlev1.Profile], error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SelectMergeProfile") + } + + var r0 *connect.Response[googlev1.Profile] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.SelectMergeProfileRequest]) (*connect.Response[googlev1.Profile], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.SelectMergeProfileRequest]) *connect.Response[googlev1.Profile]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[googlev1.Profile]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[querierv1.SelectMergeProfileRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQuerierServiceClient_SelectMergeProfile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SelectMergeProfile' +type MockQuerierServiceClient_SelectMergeProfile_Call struct { + *mock.Call +} + +// SelectMergeProfile is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *connect.Request[querierv1.SelectMergeProfileRequest] +func (_e *MockQuerierServiceClient_Expecter) SelectMergeProfile(_a0 interface{}, _a1 interface{}) *MockQuerierServiceClient_SelectMergeProfile_Call { + return &MockQuerierServiceClient_SelectMergeProfile_Call{Call: _e.mock.On("SelectMergeProfile", _a0, _a1)} +} + +func (_c *MockQuerierServiceClient_SelectMergeProfile_Call) Run(run func(_a0 context.Context, _a1 *connect.Request[querierv1.SelectMergeProfileRequest])) *MockQuerierServiceClient_SelectMergeProfile_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*connect.Request[querierv1.SelectMergeProfileRequest])) + }) + return _c +} + +func (_c *MockQuerierServiceClient_SelectMergeProfile_Call) Return(_a0 *connect.Response[googlev1.Profile], _a1 error) *MockQuerierServiceClient_SelectMergeProfile_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQuerierServiceClient_SelectMergeProfile_Call) RunAndReturn(run func(context.Context, *connect.Request[querierv1.SelectMergeProfileRequest]) (*connect.Response[googlev1.Profile], error)) *MockQuerierServiceClient_SelectMergeProfile_Call { + _c.Call.Return(run) + return _c +} + +// SelectMergeSpanProfile provides a mock function with given fields: _a0, _a1 +func (_m *MockQuerierServiceClient) SelectMergeSpanProfile(_a0 context.Context, _a1 *connect.Request[querierv1.SelectMergeSpanProfileRequest]) (*connect.Response[querierv1.SelectMergeSpanProfileResponse], error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SelectMergeSpanProfile") + } + + var r0 *connect.Response[querierv1.SelectMergeSpanProfileResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.SelectMergeSpanProfileRequest]) (*connect.Response[querierv1.SelectMergeSpanProfileResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.SelectMergeSpanProfileRequest]) *connect.Response[querierv1.SelectMergeSpanProfileResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[querierv1.SelectMergeSpanProfileResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[querierv1.SelectMergeSpanProfileRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQuerierServiceClient_SelectMergeSpanProfile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SelectMergeSpanProfile' +type MockQuerierServiceClient_SelectMergeSpanProfile_Call struct { + *mock.Call +} + +// SelectMergeSpanProfile is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *connect.Request[querierv1.SelectMergeSpanProfileRequest] +func (_e *MockQuerierServiceClient_Expecter) SelectMergeSpanProfile(_a0 interface{}, _a1 interface{}) *MockQuerierServiceClient_SelectMergeSpanProfile_Call { + return &MockQuerierServiceClient_SelectMergeSpanProfile_Call{Call: _e.mock.On("SelectMergeSpanProfile", _a0, _a1)} +} + +func (_c *MockQuerierServiceClient_SelectMergeSpanProfile_Call) Run(run func(_a0 context.Context, _a1 *connect.Request[querierv1.SelectMergeSpanProfileRequest])) *MockQuerierServiceClient_SelectMergeSpanProfile_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*connect.Request[querierv1.SelectMergeSpanProfileRequest])) + }) + return _c +} + +func (_c *MockQuerierServiceClient_SelectMergeSpanProfile_Call) Return(_a0 *connect.Response[querierv1.SelectMergeSpanProfileResponse], _a1 error) *MockQuerierServiceClient_SelectMergeSpanProfile_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQuerierServiceClient_SelectMergeSpanProfile_Call) RunAndReturn(run func(context.Context, *connect.Request[querierv1.SelectMergeSpanProfileRequest]) (*connect.Response[querierv1.SelectMergeSpanProfileResponse], error)) *MockQuerierServiceClient_SelectMergeSpanProfile_Call { + _c.Call.Return(run) + return _c +} + +// SelectMergeStacktraces provides a mock function with given fields: _a0, _a1 +func (_m *MockQuerierServiceClient) SelectMergeStacktraces(_a0 context.Context, _a1 *connect.Request[querierv1.SelectMergeStacktracesRequest]) (*connect.Response[querierv1.SelectMergeStacktracesResponse], error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SelectMergeStacktraces") + } + + var r0 *connect.Response[querierv1.SelectMergeStacktracesResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.SelectMergeStacktracesRequest]) (*connect.Response[querierv1.SelectMergeStacktracesResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.SelectMergeStacktracesRequest]) *connect.Response[querierv1.SelectMergeStacktracesResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[querierv1.SelectMergeStacktracesResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[querierv1.SelectMergeStacktracesRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQuerierServiceClient_SelectMergeStacktraces_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SelectMergeStacktraces' +type MockQuerierServiceClient_SelectMergeStacktraces_Call struct { + *mock.Call +} + +// SelectMergeStacktraces is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *connect.Request[querierv1.SelectMergeStacktracesRequest] +func (_e *MockQuerierServiceClient_Expecter) SelectMergeStacktraces(_a0 interface{}, _a1 interface{}) *MockQuerierServiceClient_SelectMergeStacktraces_Call { + return &MockQuerierServiceClient_SelectMergeStacktraces_Call{Call: _e.mock.On("SelectMergeStacktraces", _a0, _a1)} +} + +func (_c *MockQuerierServiceClient_SelectMergeStacktraces_Call) Run(run func(_a0 context.Context, _a1 *connect.Request[querierv1.SelectMergeStacktracesRequest])) *MockQuerierServiceClient_SelectMergeStacktraces_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*connect.Request[querierv1.SelectMergeStacktracesRequest])) + }) + return _c +} + +func (_c *MockQuerierServiceClient_SelectMergeStacktraces_Call) Return(_a0 *connect.Response[querierv1.SelectMergeStacktracesResponse], _a1 error) *MockQuerierServiceClient_SelectMergeStacktraces_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQuerierServiceClient_SelectMergeStacktraces_Call) RunAndReturn(run func(context.Context, *connect.Request[querierv1.SelectMergeStacktracesRequest]) (*connect.Response[querierv1.SelectMergeStacktracesResponse], error)) *MockQuerierServiceClient_SelectMergeStacktraces_Call { + _c.Call.Return(run) + return _c +} + +// SelectSeries provides a mock function with given fields: _a0, _a1 +func (_m *MockQuerierServiceClient) SelectSeries(_a0 context.Context, _a1 *connect.Request[querierv1.SelectSeriesRequest]) (*connect.Response[querierv1.SelectSeriesResponse], error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for SelectSeries") + } + + var r0 *connect.Response[querierv1.SelectSeriesResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.SelectSeriesRequest]) (*connect.Response[querierv1.SelectSeriesResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.SelectSeriesRequest]) *connect.Response[querierv1.SelectSeriesResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[querierv1.SelectSeriesResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[querierv1.SelectSeriesRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQuerierServiceClient_SelectSeries_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SelectSeries' +type MockQuerierServiceClient_SelectSeries_Call struct { + *mock.Call +} + +// SelectSeries is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *connect.Request[querierv1.SelectSeriesRequest] +func (_e *MockQuerierServiceClient_Expecter) SelectSeries(_a0 interface{}, _a1 interface{}) *MockQuerierServiceClient_SelectSeries_Call { + return &MockQuerierServiceClient_SelectSeries_Call{Call: _e.mock.On("SelectSeries", _a0, _a1)} +} + +func (_c *MockQuerierServiceClient_SelectSeries_Call) Run(run func(_a0 context.Context, _a1 *connect.Request[querierv1.SelectSeriesRequest])) *MockQuerierServiceClient_SelectSeries_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*connect.Request[querierv1.SelectSeriesRequest])) + }) + return _c +} + +func (_c *MockQuerierServiceClient_SelectSeries_Call) Return(_a0 *connect.Response[querierv1.SelectSeriesResponse], _a1 error) *MockQuerierServiceClient_SelectSeries_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQuerierServiceClient_SelectSeries_Call) RunAndReturn(run func(context.Context, *connect.Request[querierv1.SelectSeriesRequest]) (*connect.Response[querierv1.SelectSeriesResponse], error)) *MockQuerierServiceClient_SelectSeries_Call { + _c.Call.Return(run) + return _c +} + +// Series provides a mock function with given fields: _a0, _a1 +func (_m *MockQuerierServiceClient) Series(_a0 context.Context, _a1 *connect.Request[querierv1.SeriesRequest]) (*connect.Response[querierv1.SeriesResponse], error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for Series") + } + + var r0 *connect.Response[querierv1.SeriesResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.SeriesRequest]) (*connect.Response[querierv1.SeriesResponse], error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[querierv1.SeriesRequest]) *connect.Response[querierv1.SeriesResponse]); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*connect.Response[querierv1.SeriesResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[querierv1.SeriesRequest]) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQuerierServiceClient_Series_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Series' +type MockQuerierServiceClient_Series_Call struct { + *mock.Call +} + +// Series is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *connect.Request[querierv1.SeriesRequest] +func (_e *MockQuerierServiceClient_Expecter) Series(_a0 interface{}, _a1 interface{}) *MockQuerierServiceClient_Series_Call { + return &MockQuerierServiceClient_Series_Call{Call: _e.mock.On("Series", _a0, _a1)} +} + +func (_c *MockQuerierServiceClient_Series_Call) Run(run func(_a0 context.Context, _a1 *connect.Request[querierv1.SeriesRequest])) *MockQuerierServiceClient_Series_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*connect.Request[querierv1.SeriesRequest])) + }) + return _c +} + +func (_c *MockQuerierServiceClient_Series_Call) Return(_a0 *connect.Response[querierv1.SeriesResponse], _a1 error) *MockQuerierServiceClient_Series_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQuerierServiceClient_Series_Call) RunAndReturn(run func(context.Context, *connect.Request[querierv1.SeriesRequest]) (*connect.Response[querierv1.SeriesResponse], error)) *MockQuerierServiceClient_Series_Call { + _c.Call.Return(run) + return _c +} + +// NewMockQuerierServiceClient creates a new instance of MockQuerierServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockQuerierServiceClient(t interface { + mock.TestingT + Cleanup(func()) +}) *MockQuerierServiceClient { + mock := &MockQuerierServiceClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/util/config.go b/pkg/util/config.go index cbfb5a34d4..eaefd5173b 100644 --- a/pkg/util/config.go +++ b/pkg/util/config.go @@ -8,6 +8,7 @@ package util import ( "fmt" "reflect" + "time" ) func stringKeyMapToInterfaceKeyMap(m map[string]interface{}) map[interface{}]interface{} { @@ -60,6 +61,10 @@ func DiffConfig(defaultConfig, actualConfig map[interface{}]interface{}) (map[in if defaultValue != nil { output[key] = v } + case time.Time: + if defaultValue != nil && !v.IsZero() { + output[key] = v + } case map[interface{}]interface{}: defaultV, ok := defaultValue.(map[interface{}]interface{}) if !ok { diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index 4510b34a6b..f0be31d38d 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -11,6 +11,7 @@ import ( "gopkg.in/yaml.v3" writepath "github.com/grafana/pyroscope/pkg/distributor/write_path" + readpath "github.com/grafana/pyroscope/pkg/frontend/read_path" "github.com/grafana/pyroscope/pkg/phlaredb/block" ) @@ -101,6 +102,9 @@ type Limits struct { // Write path overrides used in the write path router. WritePathOverrides writepath.Config `yaml:",inline" json:",inline"` + + // Write path overrides used in the read path router. + ReadPathOverrides readpath.Config `yaml:",inline" json:",inline"` } // LimitError are errors that do not comply with the limits specified. @@ -460,6 +464,10 @@ func (o *Overrides) WritePathOverrides(tenantID string) writepath.Config { return o.getOverridesForTenant(tenantID).WritePathOverrides } +func (o *Overrides) ReadPathOverrides(tenantID string) readpath.Config { + return o.getOverridesForTenant(tenantID).ReadPathOverrides +} + func (o *Overrides) DefaultLimits() *Limits { return o.defaultLimits } diff --git a/pkg/validation/validate.go b/pkg/validation/validate.go index ab7c0413f2..e17a36f7d2 100644 --- a/pkg/validation/validate.go +++ b/pkg/validation/validate.go @@ -370,6 +370,26 @@ func ValidateRangeRequest(limits RangeRequestLimits, tenantIDs []string, req mod return ValidatedRangeRequest{Interval: req}, nil } +func SanitizeTimeRange(limits RangeRequestLimits, tenant []string, start, end *int64) (empty bool, err error) { + var interval model.Interval + if start != nil { + interval.Start = model.Time(*start) + } + if end != nil { + interval.End = model.Time(*end) + } + validated, err := ValidateRangeRequest(limits, tenant, interval, model.Now()) + if err != nil { + return false, err + } + if validated.IsEmpty { + return true, nil + } + *start = int64(validated.Start) + *end = int64(validated.End) + return false, nil +} + type FlameGraphLimits interface { MaxFlameGraphNodesDefault(string) int MaxFlameGraphNodesMax(string) int