From 973b8d382d11d83086de357e5c71d17d39a49663 Mon Sep 17 00:00:00 2001 From: Ryan Hall Date: Thu, 4 Feb 2021 17:15:07 -0800 Subject: [PATCH 1/3] Remove disk series read limit The plan is to change this from a lookback limit to a new explicit acquire/release permit api that is coming soon. For now remove the limit, so we don't release it and have to support it. --- .../operational_guide/resource_limits.md | 28 +---- src/cluster/generated/proto/kvpb/kv.pb.go | 102 ++++-------------- src/cluster/generated/proto/kvpb/kv.proto | 2 +- .../services/m3dbnode/config/config_test.go | 1 - src/cmd/services/m3dbnode/config/limits.go | 6 -- .../server/tchannelthrift/node/service.go | 10 -- .../tchannelthrift/node/service_test.go | 11 +- src/dbnode/server/server.go | 14 --- src/dbnode/storage/limits/query_limits.go | 14 --- .../storage/limits/query_limits_test.go | 28 +---- src/dbnode/storage/limits/types.go | 2 - 11 files changed, 29 insertions(+), 189 deletions(-) diff --git a/site/content/operational_guide/resource_limits.md b/site/content/operational_guide/resource_limits.md index 3b6041388c..e42ea88701 100644 --- a/site/content/operational_guide/resource_limits.md +++ b/site/content/operational_guide/resource_limits.md @@ -53,15 +53,6 @@ per second safely with your deployment and you want to use the default lookback of `15s` then you would multiply 10,000 by 15 to get 150,000 as a max value with a 15s lookback. -The third limit `maxRecentlyQueriedSeriesDiskRead` caps the series IDs matched by incoming -queries. This originally was distinct from the limit `maxRecentlyQueriedSeriesBlocks`, which -also limits the memory cost of specific series matched, because of an inefficiency -in how allocations would occur even for series known to not be present on disk for a given -shard. This inefficiency has been resolved https://github.com/m3db/m3/pull/3103 and therefore -this limit should be tracking memory cost linearly relative to `maxRecentlyQueriedSeriesBlocks`. -It is recommended to defer to using `maxRecentlyQueriedSeriesBlocks` over -`maxRecentlyQueriedSeriesDiskRead` given both should cap the resources similarly. - ### Annotated configuration ```yaml @@ -91,18 +82,6 @@ limits: # and read until the lookback period resets. lookback: 15s - # If set, will enforce a maximum on the series read from disk. - # This limit can be used to ensure queries that match an extremely high - # volume of series can be limited before even reading the underlying series data from disk. - maxRecentlyQueriedSeriesDiskRead: - # Value sets the maximum number of series read from disk. - value: 0 - # Lookback sets the time window that this limit is enforced over, every - # lookback period the global count is reset to zero and when the limit - # is reached it will reject any further time series blocks being matched - # and read until the lookback period resets. - lookback: 15s - # If set then will limit the number of parallel write batch requests to the # database and return errors if hit. maxOutstandingWriteRequests: 0 @@ -133,11 +112,6 @@ curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/kvstore -d '{ "lookbackSeconds":15, "forceExceeded":false }, - "maxRecentlyQueriedSeriesDiskRead": { - "limit":0, - "lookbackSeconds":15, - "forceExceeded":false - } }, "commit":true }' @@ -216,4 +190,4 @@ limits: The following headers can also be used to override configured limits on a per request basis (to allow for different limits dependent on caller): -{{% fileinclude file="headers_optional_read_limits.md" %}} \ No newline at end of file +{{% fileinclude file="headers_optional_read_limits.md" %}} diff --git a/src/cluster/generated/proto/kvpb/kv.pb.go b/src/cluster/generated/proto/kvpb/kv.pb.go index d3c7c43f87..14c3d14a80 100644 --- a/src/cluster/generated/proto/kvpb/kv.pb.go +++ b/src/cluster/generated/proto/kvpb/kv.pb.go @@ -119,7 +119,6 @@ func (m *KeyValueUpdateResult) GetNew() string { type QueryLimits struct { MaxRecentlyQueriedSeriesBlocks *QueryLimit `protobuf:"bytes,1,opt,name=maxRecentlyQueriedSeriesBlocks" json:"maxRecentlyQueriedSeriesBlocks,omitempty"` MaxRecentlyQueriedSeriesDiskBytesRead *QueryLimit `protobuf:"bytes,2,opt,name=maxRecentlyQueriedSeriesDiskBytesRead" json:"maxRecentlyQueriedSeriesDiskBytesRead,omitempty"` - MaxRecentlyQueriedSeriesDiskRead *QueryLimit `protobuf:"bytes,3,opt,name=maxRecentlyQueriedSeriesDiskRead" json:"maxRecentlyQueriedSeriesDiskRead,omitempty"` } func (m *QueryLimits) Reset() { *m = QueryLimits{} } @@ -141,13 +140,6 @@ func (m *QueryLimits) GetMaxRecentlyQueriedSeriesDiskBytesRead() *QueryLimit { return nil } -func (m *QueryLimits) GetMaxRecentlyQueriedSeriesDiskRead() *QueryLimit { - if m != nil { - return m.MaxRecentlyQueriedSeriesDiskRead - } - return nil -} - type QueryLimit struct { Limit int64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` LookbackSeconds int64 `protobuf:"varint,2,opt,name=lookbackSeconds,proto3" json:"lookbackSeconds,omitempty"` @@ -297,16 +289,6 @@ func (m *QueryLimits) MarshalTo(dAtA []byte) (int, error) { } i += n2 } - if m.MaxRecentlyQueriedSeriesDiskRead != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintKv(dAtA, i, uint64(m.MaxRecentlyQueriedSeriesDiskRead.Size())) - n3, err := m.MaxRecentlyQueriedSeriesDiskRead.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } return i, nil } @@ -403,10 +385,6 @@ func (m *QueryLimits) Size() (n int) { l = m.MaxRecentlyQueriedSeriesDiskBytesRead.Size() n += 1 + l + sovKv(uint64(l)) } - if m.MaxRecentlyQueriedSeriesDiskRead != nil { - l = m.MaxRecentlyQueriedSeriesDiskRead.Size() - n += 1 + l + sovKv(uint64(l)) - } return n } @@ -798,39 +776,6 @@ func (m *QueryLimits) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRecentlyQueriedSeriesDiskRead", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxRecentlyQueriedSeriesDiskRead == nil { - m.MaxRecentlyQueriedSeriesDiskRead = &QueryLimit{} - } - if err := m.MaxRecentlyQueriedSeriesDiskRead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipKv(dAtA[iNdEx:]) @@ -1070,28 +1015,27 @@ func init() { } var fileDescriptorKv = []byte{ - // 361 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xcf, 0x6e, 0xda, 0x30, - 0x18, 0x5f, 0xc8, 0x86, 0xc6, 0x87, 0xb6, 0x45, 0x16, 0x9a, 0x38, 0x45, 0x28, 0xda, 0x24, 0x4e, - 0xb1, 0x34, 0xae, 0x3b, 0xa1, 0xed, 0x54, 0x0e, 0xad, 0x51, 0xab, 0x1e, 0x7a, 0x49, 0xec, 0x0f, - 0x1a, 0xc5, 0x89, 0x51, 0xec, 0x50, 0xf2, 0x16, 0x7d, 0x91, 0xbe, 0x47, 0x8f, 0x7d, 0x84, 0x8a, - 0xbe, 0x48, 0x65, 0x83, 0x84, 0xa8, 0xa0, 0xf4, 0x12, 0x7d, 0xbf, 0x5f, 0x7e, 0x7f, 0xe2, 0x7c, - 0x86, 0xbf, 0xf3, 0xcc, 0xdc, 0xd6, 0x69, 0xcc, 0x55, 0x41, 0x8b, 0x91, 0x48, 0x69, 0x31, 0xa2, - 0xba, 0xe2, 0x94, 0xcb, 0x5a, 0x1b, 0xac, 0xe8, 0x1c, 0x4b, 0xac, 0x12, 0x83, 0x82, 0x2e, 0x2a, - 0x65, 0x14, 0xcd, 0x97, 0x8b, 0x94, 0xe6, 0xcb, 0xd8, 0x21, 0xf2, 0xd9, 0xc2, 0xe8, 0x1c, 0xbe, - 0x9f, 0x61, 0x73, 0x95, 0xc8, 0x1a, 0x2f, 0x17, 0x22, 0x31, 0x48, 0x02, 0xf0, 0x73, 0x6c, 0xfa, - 0xde, 0xc0, 0x1b, 0x76, 0x98, 0x1d, 0x49, 0x0f, 0xbe, 0x2c, 0xad, 0xa0, 0xdf, 0x72, 0xdc, 0x06, - 0x90, 0x9f, 0xd0, 0xe6, 0xaa, 0x28, 0x32, 0xd3, 0xf7, 0x07, 0xde, 0xf0, 0x2b, 0xdb, 0xa2, 0x68, - 0x02, 0xbd, 0xfd, 0x44, 0x86, 0xba, 0x96, 0xe6, 0x40, 0x6e, 0x00, 0xbe, 0x92, 0x62, 0x9b, 0x6a, - 0x47, 0xcb, 0x94, 0x78, 0xe7, 0x02, 0x3b, 0xcc, 0x8e, 0xd1, 0x43, 0x0b, 0xba, 0x17, 0x35, 0x56, - 0xcd, 0x24, 0x2b, 0x32, 0xa3, 0xc9, 0x35, 0x84, 0x45, 0xb2, 0x62, 0xc8, 0xb1, 0x34, 0xb2, 0xb1, - 0x6f, 0x32, 0x14, 0x53, 0xfb, 0xd4, 0x63, 0xa9, 0x78, 0xae, 0x5d, 0x41, 0xf7, 0x4f, 0x10, 0xdb, - 0xe3, 0xc5, 0x3b, 0x2b, 0x3b, 0xe1, 0x23, 0x33, 0xf8, 0x7d, 0x4c, 0xf1, 0x2f, 0xd3, 0xf9, 0xb8, - 0x31, 0xa8, 0x19, 0x26, 0x9b, 0xef, 0x3d, 0x54, 0xf0, 0x31, 0x3b, 0xb9, 0x81, 0xc1, 0x7b, 0x42, - 0x57, 0xe1, 0x1f, 0xa9, 0x38, 0xe9, 0x8c, 0x2a, 0x80, 0x9d, 0xde, 0x6e, 0x4e, 0xda, 0xc1, 0xfd, - 0x14, 0x9f, 0x6d, 0x00, 0x19, 0xc2, 0x0f, 0xa9, 0x54, 0x9e, 0x26, 0x3c, 0x9f, 0x22, 0x57, 0xa5, - 0xd0, 0xee, 0x4c, 0x3e, 0x7b, 0x4b, 0x93, 0x5f, 0xf0, 0x6d, 0xa6, 0x2a, 0x8e, 0xff, 0x57, 0x1c, - 0x51, 0xa0, 0xd8, 0xae, 0x7a, 0x9f, 0x1c, 0x07, 0x8f, 0xeb, 0xd0, 0x7b, 0x5a, 0x87, 0xde, 0xf3, - 0x3a, 0xf4, 0xee, 0x5f, 0xc2, 0x4f, 0x69, 0xdb, 0x5d, 0xb1, 0xd1, 0x6b, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x4e, 0xb0, 0xcd, 0x62, 0xa2, 0x02, 0x00, 0x00, + // 348 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xcd, 0x4a, 0xf3, 0x40, + 0x14, 0xfd, 0xf2, 0xa5, 0x96, 0xf6, 0x16, 0x35, 0x0c, 0x45, 0xba, 0x0a, 0x25, 0x28, 0x74, 0x95, + 0x01, 0xbb, 0x75, 0x55, 0x74, 0xa3, 0x5d, 0xe8, 0x14, 0xc5, 0x6d, 0x32, 0x73, 0x5b, 0x43, 0x26, + 0x99, 0x92, 0x99, 0xd4, 0xe6, 0x2d, 0x7c, 0x2c, 0x97, 0x6e, 0xdc, 0x4b, 0x7d, 0x11, 0x99, 0xb4, + 0x50, 0x2a, 0x15, 0xdd, 0x0c, 0xe7, 0x9c, 0xb9, 0xe7, 0xdc, 0x3b, 0x3f, 0x70, 0x31, 0x4b, 0xcc, + 0x53, 0x19, 0x87, 0x5c, 0x65, 0x34, 0x1b, 0x8a, 0x98, 0x66, 0x43, 0xaa, 0x0b, 0x4e, 0xb9, 0x2c, + 0xb5, 0xc1, 0x82, 0xce, 0x30, 0xc7, 0x22, 0x32, 0x28, 0xe8, 0xbc, 0x50, 0x46, 0xd1, 0x74, 0x31, + 0x8f, 0x69, 0xba, 0x08, 0x6b, 0x46, 0x1a, 0x96, 0x06, 0xb7, 0x70, 0x74, 0x83, 0xd5, 0x43, 0x24, + 0x4b, 0xbc, 0x9f, 0x8b, 0xc8, 0x20, 0xf1, 0xc0, 0x4d, 0xb1, 0xea, 0x39, 0x7d, 0x67, 0xd0, 0x66, + 0x16, 0x92, 0x2e, 0x1c, 0x2c, 0x6c, 0x41, 0xef, 0x7f, 0xad, 0xad, 0x09, 0x39, 0x81, 0x26, 0x57, + 0x59, 0x96, 0x98, 0x9e, 0xdb, 0x77, 0x06, 0x2d, 0xb6, 0x61, 0xc1, 0x18, 0xba, 0xbb, 0x89, 0x0c, + 0x75, 0x29, 0xcd, 0x9e, 0x5c, 0x0f, 0x5c, 0x25, 0xc5, 0x26, 0xd5, 0x42, 0xab, 0xe4, 0xf8, 0x5c, + 0x07, 0xb6, 0x99, 0x85, 0xc1, 0xbb, 0x03, 0x9d, 0xbb, 0x12, 0x8b, 0x6a, 0x9c, 0x64, 0x89, 0xd1, + 0xe4, 0x11, 0xfc, 0x2c, 0x5a, 0x32, 0xe4, 0x98, 0x1b, 0x59, 0xd9, 0x9d, 0x04, 0xc5, 0xc4, 0xae, + 0x7a, 0x24, 0x15, 0x4f, 0x75, 0xdd, 0xa0, 0x73, 0xee, 0x85, 0xf6, 0x78, 0xe1, 0xd6, 0xca, 0x7e, + 0xf1, 0x91, 0x29, 0x9c, 0xfd, 0x54, 0x71, 0x99, 0xe8, 0x74, 0x54, 0x19, 0xd4, 0x0c, 0xa3, 0xf5, + 0xbc, 0xfb, 0x1a, 0xfc, 0xcd, 0x7e, 0xdd, 0x68, 0xb9, 0x5e, 0x23, 0x28, 0x00, 0xb6, 0x56, 0x7b, + 0xc3, 0xd2, 0x82, 0x7a, 0x78, 0x97, 0xad, 0x09, 0x19, 0xc0, 0xb1, 0x54, 0x2a, 0x8d, 0x23, 0x9e, + 0x4e, 0x90, 0xab, 0x5c, 0xe8, 0xba, 0xb7, 0xcb, 0xbe, 0xcb, 0xe4, 0x14, 0x0e, 0xa7, 0xaa, 0xe0, + 0x78, 0xb5, 0xe4, 0x88, 0x02, 0xc5, 0xe6, 0x49, 0x76, 0xc5, 0x91, 0xf7, 0xba, 0xf2, 0x9d, 0xb7, + 0x95, 0xef, 0x7c, 0xac, 0x7c, 0xe7, 0xe5, 0xd3, 0xff, 0x17, 0x37, 0xeb, 0xaf, 0x30, 0xfc, 0x0a, + 0x00, 0x00, 0xff, 0xff, 0xbd, 0xc6, 0xa2, 0x4e, 0x4a, 0x02, 0x00, 0x00, } diff --git a/src/cluster/generated/proto/kvpb/kv.proto b/src/cluster/generated/proto/kvpb/kv.proto index ef2b2f60d5..5e5954f403 100644 --- a/src/cluster/generated/proto/kvpb/kv.proto +++ b/src/cluster/generated/proto/kvpb/kv.proto @@ -36,7 +36,7 @@ message KeyValueUpdateResult { message QueryLimits { QueryLimit maxRecentlyQueriedSeriesBlocks = 1; QueryLimit maxRecentlyQueriedSeriesDiskBytesRead = 2; - QueryLimit maxRecentlyQueriedSeriesDiskRead = 3; + reserved 3; } message QueryLimit { diff --git a/src/cmd/services/m3dbnode/config/config_test.go b/src/cmd/services/m3dbnode/config/config_test.go index 717dfbb87c..02ca66b97e 100644 --- a/src/cmd/services/m3dbnode/config/config_test.go +++ b/src/cmd/services/m3dbnode/config/config_test.go @@ -730,7 +730,6 @@ func TestConfiguration(t *testing.T) { meta_event_reporting_enabled: false limits: maxRecentlyQueriedSeriesDiskBytesRead: null - maxRecentlyQueriedSeriesDiskRead: null maxRecentlyQueriedSeriesBlocks: null maxOutstandingWriteRequests: 0 maxOutstandingReadRequests: 0 diff --git a/src/cmd/services/m3dbnode/config/limits.go b/src/cmd/services/m3dbnode/config/limits.go index eb26cb0de8..29c6dafe78 100644 --- a/src/cmd/services/m3dbnode/config/limits.go +++ b/src/cmd/services/m3dbnode/config/limits.go @@ -29,12 +29,6 @@ type LimitsConfiguration struct { // max is surpassed encounter an error. MaxRecentlyQueriedSeriesDiskBytesRead *MaxRecentQueryResourceLimitConfiguration `yaml:"maxRecentlyQueriedSeriesDiskBytesRead"` - // MaxRecentlyQueriedSeriesDiskRead sets the upper limit on time series read from disk within a given lookback - // period. Queries which are issued while this max is surpassed encounter an error. - // This is the number of time series, which is different from the number of bytes controlled by - // MaxRecentlyQueriedSeriesDiskBytesRead. - MaxRecentlyQueriedSeriesDiskRead *MaxRecentQueryResourceLimitConfiguration `yaml:"maxRecentlyQueriedSeriesDiskRead"` - // MaxRecentlyQueriedSeriesBlocks sets the upper limit on time series blocks // count within a given lookback period. Queries which are issued while this // max is surpassed encounter an error. diff --git a/src/dbnode/network/server/tchannelthrift/node/service.go b/src/dbnode/network/server/tchannelthrift/node/service.go index 5e3070edd1..08ca29c435 100644 --- a/src/dbnode/network/server/tchannelthrift/node/service.go +++ b/src/dbnode/network/server/tchannelthrift/node/service.go @@ -811,7 +811,6 @@ func (s *service) FetchTaggedIter(ctx context.Context, req *rpc.FetchTaggedReque nsID: ns, tagEncoder: tagEncoder, iOpts: s.opts.InstrumentOptions(), - blockReadLimit: s.queryLimits.DiskSeriesReadLimit(), }), nil } @@ -856,7 +855,6 @@ type fetchTaggedResultsIter struct { docReader *docs.EncodedDocumentReader tagEncoder serialize.TagEncoder iOpts instrument.Options - blocksReadLimit limits.LookbackLimit } type fetchTaggedResultsIterOpts struct { @@ -868,7 +866,6 @@ type fetchTaggedResultsIterOpts struct { nsID ident.ID tagEncoder serialize.TagEncoder iOpts instrument.Options - blockReadLimit limits.LookbackLimit } func newFetchTaggedResultsIter(opts fetchTaggedResultsIterOpts) FetchTaggedResultsIter { //nolint: gocritic @@ -884,7 +881,6 @@ func newFetchTaggedResultsIter(opts fetchTaggedResultsIterOpts) FetchTaggedResul docReader: opts.docReader, tagEncoder: opts.tagEncoder, iOpts: opts.iOpts, - blocksReadLimit: opts.blockReadLimit, } return iter @@ -911,7 +907,6 @@ func (i *fetchTaggedResultsIter) Next(ctx context.Context) bool { docReader: i.docReader, tagEncoder: i.tagEncoder, iOpts: i.iOpts, - blocksReadLimit: i.blocksReadLimit, } if i.fetchData { // NB(r): Use a bytes ID here so that this ID doesn't need to be @@ -941,10 +936,6 @@ func (i *fetchTaggedResultsIter) Next(ctx context.Context) bool { for blockIter.Next(ctx) { curr := blockIter.Current() blockReaders = append(blockReaders, curr) - if err := i.blocksReadLimit.Inc(len(blockReaders), nil); err != nil { - i.err = err - return false - } } if blockIter.Err() != nil { i.err = blockIter.Err() @@ -988,7 +979,6 @@ type idResult struct { tagEncoder serialize.TagEncoder blockReadersIter series.BlockReaderIter blockReaders [][]xio.BlockReader - blocksReadLimit limits.LookbackLimit iOpts instrument.Options } diff --git a/src/dbnode/network/server/tchannelthrift/node/service_test.go b/src/dbnode/network/server/tchannelthrift/node/service_test.go index e0bbfecc0e..3e90711f88 100644 --- a/src/dbnode/network/server/tchannelthrift/node/service_test.go +++ b/src/dbnode/network/server/tchannelthrift/node/service_test.go @@ -1557,11 +1557,6 @@ func TestServiceFetchTagged(t *testing.T) { { name: "happy path", }, - { - name: "block read limit", - blocksReadLimit: 1, - fetchErrMsg: "query aborted due to limit", - }, } for _, tc := range testCases { @@ -1576,11 +1571,7 @@ func TestServiceFetchTagged(t *testing.T) { queryLimits, err := limits.NewQueryLimits(limits.NewOptions(). SetInstrumentOptions(testTChannelThriftOptions.InstrumentOptions()). SetBytesReadLimitOpts(limits.DefaultLookbackLimitOptions()). - SetDocsLimitOpts(limits.DefaultLookbackLimitOptions()). - SetDiskSeriesReadLimitOpts(limits.LookbackLimitOptions{ - Limit: tc.blocksReadLimit, - Lookback: time.Second * 1, - })) + SetDocsLimitOpts(limits.DefaultLookbackLimitOptions())) require.NoError(t, err) testTChannelThriftOptions = testTChannelThriftOptions.SetQueryLimits(queryLimits) diff --git a/src/dbnode/server/server.go b/src/dbnode/server/server.go index c8faabdf05..d01f3f33fe 100644 --- a/src/dbnode/server/server.go +++ b/src/dbnode/server/server.go @@ -451,7 +451,6 @@ func Run(runOpts RunOptions) { // Setup query stats tracking. docsLimit := limits.DefaultLookbackLimitOptions() bytesReadLimit := limits.DefaultLookbackLimitOptions() - diskSeriesReadLimit := limits.DefaultLookbackLimitOptions() if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesBlocks; limitConfig != nil { docsLimit.Limit = limitConfig.Value docsLimit.Lookback = limitConfig.Lookback @@ -460,14 +459,9 @@ func Run(runOpts RunOptions) { bytesReadLimit.Limit = limitConfig.Value bytesReadLimit.Lookback = limitConfig.Lookback } - if limitConfig := runOpts.Config.Limits.MaxRecentlyQueriedSeriesDiskRead; limitConfig != nil { - diskSeriesReadLimit.Limit = limitConfig.Value - diskSeriesReadLimit.Lookback = limitConfig.Lookback - } limitOpts := limits.NewOptions(). SetDocsLimitOpts(docsLimit). SetBytesReadLimitOpts(bytesReadLimit). - SetDiskSeriesReadLimitOpts(diskSeriesReadLimit). SetInstrumentOptions(iOpts) if builder := opts.SourceLoggerBuilder(); builder != nil { limitOpts = limitOpts.SetSourceLoggerBuilder(builder) @@ -1214,16 +1208,12 @@ func updateQueryLimits(logger *zap.Logger, // Default to the config-based limits if unset in dynamic limits. // Otherwise, use the dynamic limit. docsLimitOpts = configOpts.DocsLimitOpts() - diskSeriesReadLimitOpts = configOpts.DiskSeriesReadLimitOpts() bytesReadLimitOpts = configOpts.BytesReadLimitOpts() ) if dynamicOpts != nil { if dynamicOpts.MaxRecentlyQueriedSeriesBlocks != nil { docsLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedSeriesBlocks) } - if dynamicOpts.MaxRecentlyQueriedSeriesDiskRead != nil { - diskSeriesReadLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedSeriesDiskRead) - } if dynamicOpts.MaxRecentlyQueriedSeriesDiskBytesRead != nil { bytesReadLimitOpts = dynamicLimitToLimitOpts(dynamicOpts.MaxRecentlyQueriedSeriesDiskBytesRead) } @@ -1233,10 +1223,6 @@ func updateQueryLimits(logger *zap.Logger, logger.Error("error updating docs limit", zap.Error(err)) } - if err := updateQueryLimit(queryLimits.DiskSeriesReadLimit(), diskSeriesReadLimitOpts); err != nil { - logger.Error("error updating series read limit", zap.Error(err)) - } - if err := updateQueryLimit(queryLimits.BytesReadLimit(), bytesReadLimitOpts); err != nil { logger.Error("error updating bytes read limit", zap.Error(err)) } diff --git a/src/dbnode/storage/limits/query_limits.go b/src/dbnode/storage/limits/query_limits.go index 5de39875a5..88594d2370 100644 --- a/src/dbnode/storage/limits/query_limits.go +++ b/src/dbnode/storage/limits/query_limits.go @@ -41,7 +41,6 @@ const ( type queryLimits struct { docsLimit *lookbackLimit bytesReadLimit *lookbackLimit - seriesDiskReadLimit *lookbackLimit } type lookbackLimit struct { @@ -90,21 +89,17 @@ func NewQueryLimits(options Options) (QueryLimits, error) { iOpts = options.InstrumentOptions() docsLimitOpts = options.DocsLimitOpts() bytesReadLimitOpts = options.BytesReadLimitOpts() - diskSeriesReadLimitOpts = options.DiskSeriesReadLimitOpts() sourceLoggerBuilder = options.SourceLoggerBuilder() docsLimit = newLookbackLimit( iOpts, docsLimitOpts, "docs-matched", sourceLoggerBuilder) bytesReadLimit = newLookbackLimit( iOpts, bytesReadLimitOpts, "disk-bytes-read", sourceLoggerBuilder) - seriesDiskReadLimit = newLookbackLimit( - iOpts, diskSeriesReadLimitOpts, "disk-series-read", sourceLoggerBuilder) ) return &queryLimits{ docsLimit: docsLimit, bytesReadLimit: bytesReadLimit, - seriesDiskReadLimit: seriesDiskReadLimit, }, nil } @@ -157,15 +152,10 @@ func (q *queryLimits) BytesReadLimit() LookbackLimit { return q.bytesReadLimit } -func (q *queryLimits) DiskSeriesReadLimit() LookbackLimit { - return q.seriesDiskReadLimit -} - func (q *queryLimits) Start() { // Lock on explicit start to avoid any collision with asynchronous updating // which will call stop/start if the lookback has changed. q.docsLimit.startWithLock() - q.seriesDiskReadLimit.startWithLock() q.bytesReadLimit.startWithLock() } @@ -173,7 +163,6 @@ func (q *queryLimits) Stop() { // Lock on explicit stop to avoid any collision with asynchronous updating // which will call stop/start if the lookback has changed. q.docsLimit.stopWithLock() - q.seriesDiskReadLimit.stopWithLock() q.bytesReadLimit.stopWithLock() } @@ -181,9 +170,6 @@ func (q *queryLimits) AnyExceeded() error { if err := q.docsLimit.exceeded(); err != nil { return err } - if err := q.seriesDiskReadLimit.exceeded(); err != nil { - return err - } return q.bytesReadLimit.exceeded() } diff --git a/src/dbnode/storage/limits/query_limits_test.go b/src/dbnode/storage/limits/query_limits_test.go index 6ce32708a9..37833dab92 100644 --- a/src/dbnode/storage/limits/query_limits_test.go +++ b/src/dbnode/storage/limits/query_limits_test.go @@ -37,13 +37,11 @@ import ( func testQueryLimitOptions( docOpts LookbackLimitOptions, bytesOpts LookbackLimitOptions, - seriesOpts LookbackLimitOptions, iOpts instrument.Options, ) Options { return NewOptions(). SetDocsLimitOpts(docOpts). SetBytesReadLimitOpts(bytesOpts). - SetDiskSeriesReadLimitOpts(seriesOpts). SetInstrumentOptions(iOpts) } @@ -57,11 +55,7 @@ func TestQueryLimits(t *testing.T) { Limit: l, Lookback: time.Second, } - seriesOpts := LookbackLimitOptions{ - Limit: l, - Lookback: time.Second, - } - opts := testQueryLimitOptions(docOpts, bytesOpts, seriesOpts, instrument.NewOptions()) + opts := testQueryLimitOptions(docOpts, bytesOpts, instrument.NewOptions()) queryLimits, err := NewQueryLimits(opts) require.NoError(t, err) require.NotNil(t, queryLimits) @@ -76,7 +70,7 @@ func TestQueryLimits(t *testing.T) { require.True(t, xerrors.IsInvalidParams(err)) require.True(t, IsQueryLimitExceededError(err)) - opts = testQueryLimitOptions(docOpts, bytesOpts, seriesOpts, instrument.NewOptions()) + opts = testQueryLimitOptions(docOpts, bytesOpts, instrument.NewOptions()) queryLimits, err = NewQueryLimits(opts) require.NoError(t, err) require.NotNil(t, queryLimits) @@ -91,22 +85,6 @@ func TestQueryLimits(t *testing.T) { require.Error(t, err) require.True(t, xerrors.IsInvalidParams(err)) require.True(t, IsQueryLimitExceededError(err)) - - opts = testQueryLimitOptions(docOpts, bytesOpts, seriesOpts, instrument.NewOptions()) - queryLimits, err = NewQueryLimits(opts) - require.NoError(t, err) - require.NotNil(t, queryLimits) - - // No error yet. - err = queryLimits.AnyExceeded() - require.NoError(t, err) - - // Limit from bytes. - require.Error(t, queryLimits.DiskSeriesReadLimit().Inc(2, nil)) - err = queryLimits.AnyExceeded() - require.Error(t, err) - require.True(t, xerrors.IsInvalidParams(err)) - require.True(t, IsQueryLimitExceededError(err)) } func TestLookbackLimit(t *testing.T) { @@ -354,7 +332,7 @@ func TestSourceLogger(t *testing.T) { } builder = &testBuilder{records: []testLoggerRecord{}} - opts = testQueryLimitOptions(noLimit, noLimit, noLimit, iOpts). + opts = testQueryLimitOptions(noLimit, noLimit, iOpts). SetSourceLoggerBuilder(builder) ) diff --git a/src/dbnode/storage/limits/types.go b/src/dbnode/storage/limits/types.go index 22b46b4b57..e944305cf1 100644 --- a/src/dbnode/storage/limits/types.go +++ b/src/dbnode/storage/limits/types.go @@ -39,8 +39,6 @@ type QueryLimits interface { DocsLimit() LookbackLimit // BytesReadLimit limits queries by a global concurrent count of bytes read from disk. BytesReadLimit() LookbackLimit - // DiskSeriesReadLimit limits queries by a global concurrent count of time series read from disk. - DiskSeriesReadLimit() LookbackLimit // AnyExceeded returns an error if any of the query limits are exceeded. AnyExceeded() error From bb0b3f3a801f3dc2e7541904a88231eac63092fc Mon Sep 17 00:00:00 2001 From: Ryan Hall Date: Thu, 4 Feb 2021 17:31:14 -0800 Subject: [PATCH 2/3] space fix --- src/cluster/generated/proto/kvpb/kv.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cluster/generated/proto/kvpb/kv.proto b/src/cluster/generated/proto/kvpb/kv.proto index 5e5954f403..d2bc1c00df 100644 --- a/src/cluster/generated/proto/kvpb/kv.proto +++ b/src/cluster/generated/proto/kvpb/kv.proto @@ -36,7 +36,7 @@ message KeyValueUpdateResult { message QueryLimits { QueryLimit maxRecentlyQueriedSeriesBlocks = 1; QueryLimit maxRecentlyQueriedSeriesDiskBytesRead = 2; - reserved 3; + reserved 3; } message QueryLimit { From b1dbca85a3b3d2573cba72a24ef205b74db586c2 Mon Sep 17 00:00:00 2001 From: Ryan Hall Date: Thu, 4 Feb 2021 20:24:31 -0800 Subject: [PATCH 3/3] fix test --- .../api/v1/handler/database/kvstore_test.go | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/src/query/api/v1/handler/database/kvstore_test.go b/src/query/api/v1/handler/database/kvstore_test.go index 7232815de2..b4a700ec94 100644 --- a/src/query/api/v1/handler/database/kvstore_test.go +++ b/src/query/api/v1/handler/database/kvstore_test.go @@ -89,11 +89,6 @@ func TestUpdateQueryLimits(t *testing.T) { LookbackSeconds: 15, ForceExceeded: true, }, - MaxRecentlyQueriedSeriesDiskRead: &kvpb.QueryLimit{ - Limit: 1, - LookbackSeconds: 15, - ForceExceeded: true, - }, }, commit: true, }, @@ -110,11 +105,6 @@ func TestUpdateQueryLimits(t *testing.T) { LookbackSeconds: 15, ForceExceeded: true, }, - MaxRecentlyQueriedSeriesDiskRead: &kvpb.QueryLimit{ - Limit: 1, - LookbackSeconds: 15, - ForceExceeded: true, - }, }, commit: false, }, @@ -153,17 +143,11 @@ func TestUpdateQueryLimits(t *testing.T) { LookbackSeconds: 30, ForceExceeded: false, }, - MaxRecentlyQueriedSeriesDiskRead: &kvpb.QueryLimit{ - Limit: 100, - LookbackSeconds: 300, - ForceExceeded: false, - }, } mockVal := kv.NewMockValue(ctrl) storeMock.EXPECT().Get(kvconfig.QueryLimits).Return(mockVal, nil) mockVal.EXPECT().Unmarshal(gomock.Any()).DoAndReturn(func(v *kvpb.QueryLimits) error { v.MaxRecentlyQueriedSeriesBlocks = oldLimits.MaxRecentlyQueriedSeriesBlocks - v.MaxRecentlyQueriedSeriesDiskRead = oldLimits.MaxRecentlyQueriedSeriesDiskRead return nil }) if test.commit { @@ -181,7 +165,6 @@ func TestUpdateQueryLimits(t *testing.T) { require.Equal(t, kvconfig.QueryLimits, r.Key) require.Equal(t, *oldLimits.MaxRecentlyQueriedSeriesBlocks, *oldResult.MaxRecentlyQueriedSeriesBlocks) require.Nil(t, oldResult.MaxRecentlyQueriedSeriesDiskBytesRead) - require.Equal(t, *oldLimits.MaxRecentlyQueriedSeriesDiskRead, *oldResult.MaxRecentlyQueriedSeriesDiskRead) require.Equal(t, json.RawMessage(limitJSON), r.New) require.Equal(t, 0, r.Version) }