Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add resource_exhausted_cause metrics tag #2423

Merged
merged 5 commits into from
Jan 28, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion common/archiver/filestore/historyArchiver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
"testing"
"time"

enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/server/tests/testhelper"

"github.com/golang/mock/gomock"
Expand Down Expand Up @@ -188,7 +189,7 @@ func (s *historyArchiverSuite) TestArchive_Fail_TimeoutWhenReadingHistory() {
historyIterator := archiver.NewMockHistoryIterator(mockCtrl)
gomock.InOrder(
historyIterator.EXPECT().HasNext().Return(true),
historyIterator.EXPECT().Next().Return(nil, serviceerror.NewResourceExhausted("")),
historyIterator.EXPECT().Next().Return(nil, serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "")),
)

historyArchiver := s.newTestHistoryArchiver(historyIterator)
Expand Down
3 changes: 2 additions & 1 deletion common/archiver/gcloud/historyArchiver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
enumspb "go.temporal.io/api/enums/v1"
historypb "go.temporal.io/api/history/v1"
"go.temporal.io/api/serviceerror"

Expand Down Expand Up @@ -221,7 +222,7 @@ func (h *historyArchiverSuite) TestArchive_Fail_TimeoutWhenReadingHistory() {
historyIterator := archiver.NewMockHistoryIterator(h.controller)
gomock.InOrder(
historyIterator.EXPECT().HasNext().Return(true),
historyIterator.EXPECT().Next().Return(nil, serviceerror.NewResourceExhausted("")),
historyIterator.EXPECT().Next().Return(nil, serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "")),
)

historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper)
Expand Down
3 changes: 2 additions & 1 deletion common/archiver/s3store/historyArchiver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/uber-go/tally/v4"
enumspb "go.temporal.io/api/enums/v1"
historypb "go.temporal.io/api/history/v1"
"go.temporal.io/api/serviceerror"

Expand Down Expand Up @@ -327,7 +328,7 @@ func (s *historyArchiverSuite) TestArchive_Fail_TimeoutWhenReadingHistory() {
historyIterator := archiver.NewMockHistoryIterator(mockCtrl)
gomock.InOrder(
historyIterator.EXPECT().HasNext().Return(true),
historyIterator.EXPECT().Next().Return(nil, serviceerror.NewResourceExhausted("")),
historyIterator.EXPECT().Next().Return(nil, serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "")),
)

historyArchiver := s.newTestHistoryArchiver(historyIterator)
Expand Down
1 change: 1 addition & 0 deletions common/metrics/defs.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ const (
QueueTypeTagName = "queue_type"
visibilityTypeTagName = "visibility_type"
httpStatusTagName = "http_status"
resourceExhaustedTag = "resource_exhausted_cause"
)

// This package should hold all the metrics and tags for temporal
Expand Down
6 changes: 6 additions & 0 deletions common/metrics/tags.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ package metrics

import (
"strconv"

enumspb "go.temporal.io/api/enums/v1"
)

const (
Expand Down Expand Up @@ -212,3 +214,7 @@ func AdvancedVisibilityTypeTag() Tag {
func HttpStatusTag(value int) Tag {
return &tagImpl{key: httpStatusTagName, value: strconv.Itoa(value)}
}

func ResourceExhaustedCauseTag(cause enumspb.ResourceExhaustedCause) Tag {
return &tagImpl{key: resourceExhaustedTag, value: cause.String()}
}
4 changes: 3 additions & 1 deletion common/persistence/client/fault_injection.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"fmt"

commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/api/serviceerror"

"go.temporal.io/server/common/config"
Expand Down Expand Up @@ -95,7 +96,8 @@ var defaultErrors = []FaultWeight{
},
{
errFactory: func(msg string) error {
return serviceerror.NewResourceExhausted(fmt.Sprintf("serviceerror.NewResourceExhausted: %s", msg))
return serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_SYSTEM_OVERLOADED,
fmt.Sprintf("serviceerror.NewResourceExhausted: %s", msg))
},
weight: 1,
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"fmt"

"github.com/gocql/gocql"
enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/api/serviceerror"

"go.temporal.io/server/common/persistence"
Expand All @@ -52,8 +53,9 @@ func ConvertError(
case *gocql.RequestErrWriteTimeout:
return &persistence.TimeoutError{Msg: fmt.Sprintf("operation %v encountered %v", operation, err.Error())}
case gocql.RequestError:
if v.Code() == 0x1001 {
return serviceerror.NewResourceExhausted(fmt.Sprintf("operation %v encountered %v", operation, err.Error()))
if v.Code() == gocql.ErrCodeOverloaded {
return serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_SYSTEM_OVERLOADED,
fmt.Sprintf("operation %v encountered %v", operation, err.Error()))
}
return serviceerror.NewUnavailable(fmt.Sprintf("operation %v encountered %v", operation, err.Error()))
default:
Expand Down
3 changes: 2 additions & 1 deletion common/persistence/persistenceRateLimitedClients.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ package persistence

import (
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/api/serviceerror"

"go.temporal.io/server/common/log"
Expand All @@ -34,7 +35,7 @@ import (

var (
// ErrPersistenceLimitExceeded is the error indicating QPS limit reached.
ErrPersistenceLimitExceeded = serviceerror.NewResourceExhausted("Persistence Max QPS Reached.")
ErrPersistenceLimitExceeded = serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_SYSTEM_OVERLOADED, "Persistence Max QPS Reached.")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should this be RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT?

)

type (
Expand Down
4 changes: 3 additions & 1 deletion common/persistence/visibility/visiblity_manager_metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
package visibility

import (
enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/api/serviceerror"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
Expand Down Expand Up @@ -180,7 +181,8 @@ func (m *visibilityManagerMetrics) updateErrorMetric(scope metrics.Scope, err er
scope.IncCounter(metrics.VisibilityPersistenceTimeout)
scope.IncCounter(metrics.VisibilityPersistenceFailures)
case *serviceerror.ResourceExhausted:
scope.IncCounter(metrics.VisibilityPersistenceResourceExhausted)
scope.Tagged(metrics.ResourceExhaustedCauseTag(enumspb.RESOURCE_EXHAUSTED_CAUSE_SYSTEM_OVERLOADED)).
IncCounter(metrics.VisibilityPersistenceResourceExhausted)
scope.IncCounter(metrics.VisibilityPersistenceFailures)
case *serviceerror.Internal:
scope.IncCounter(metrics.VisibilityPersistenceInternal)
Expand Down
3 changes: 2 additions & 1 deletion common/rpc/interceptor/namespace_count_limit.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"sync"
"sync/atomic"

enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/api/serviceerror"
"google.golang.org/grpc"

Expand All @@ -38,7 +39,7 @@ import (
)

var (
ErrNamespaceCountLimitServerBusy = serviceerror.NewResourceExhausted("namespace count limit exceeded")
ErrNamespaceCountLimitServerBusy = serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_CONCURRENT_LIMIT, "namespace concurrent poller limit exceeded")
)

type (
Expand Down
3 changes: 2 additions & 1 deletion common/rpc/interceptor/namespace_rate_limit.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"context"
"time"

enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/api/serviceerror"
"google.golang.org/grpc"

Expand All @@ -40,7 +41,7 @@ const (
)

var (
ErrNamespaceRateLimitServerBusy = serviceerror.NewResourceExhausted("namespace rate limit exceeded")
ErrNamespaceRateLimitServerBusy = serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "namespace rate limit exceeded")
)

type (
Expand Down
3 changes: 2 additions & 1 deletion common/rpc/interceptor/rate_limit.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"context"
"time"

enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/api/serviceerror"
"google.golang.org/grpc"

Expand All @@ -39,7 +40,7 @@ const (
)

var (
RateLimitServerBusy = serviceerror.NewResourceExhausted("service rate limit exceeded")
RateLimitServerBusy = serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "service rate limit exceeded")
)

type (
Expand Down
2 changes: 1 addition & 1 deletion common/rpc/interceptor/telemetry.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ func (ti *TelemetryInterceptor) handleError(
case *serviceerror.NotFound:
scope.IncCounter(metrics.ServiceErrNotFoundCounter)
case *serviceerror.ResourceExhausted:
scope.IncCounter(metrics.ServiceErrResourceExhaustedCounter)
scope.Tagged(metrics.ResourceExhaustedCauseTag(err.Cause)).IncCounter(metrics.ServiceErrResourceExhaustedCounter)
case *serviceerrors.RetryReplication:
scope.IncCounter(metrics.ServiceErrRetryTaskCounter)
case *serviceerror.NamespaceAlreadyExists:
Expand Down
4 changes: 2 additions & 2 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ require (
go.opentelemetry.io/otel/sdk v1.2.0
go.opentelemetry.io/otel/sdk/export/metric v0.25.0
go.opentelemetry.io/otel/sdk/metric v0.25.0
go.temporal.io/api v1.7.1-0.20220126215723-f2aa2e2ad71d
go.temporal.io/api v1.7.1-0.20220127213442-107e361dceeb
go.temporal.io/sdk v1.12.0
go.temporal.io/version v0.3.0
go.uber.org/atomic v1.9.0
Expand Down Expand Up @@ -90,7 +90,7 @@ require (
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/otel/internal/metric v0.25.0 // indirect
go.opentelemetry.io/otel/trace v1.2.0 // indirect
golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba // indirect
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
Expand Down
8 changes: 4 additions & 4 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -461,8 +461,8 @@ go.opentelemetry.io/otel/trace v1.2.0 h1:Ys3iqbqZhcf28hHzrm5WAquMkDHNZTUkw7KHbuN
go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.temporal.io/api v1.5.0/go.mod h1:BqKxEJJYdxb5dqf0ODfzfMxh8UEQ5L3zKS51FiIYYkA=
go.temporal.io/api v1.7.1-0.20220126215723-f2aa2e2ad71d h1:uqiiUinsGlZ7R6M74B8Md2XXbjkoinQdhbFrY7coh1o=
go.temporal.io/api v1.7.1-0.20220126215723-f2aa2e2ad71d/go.mod h1:Qy3l0Bw9C1RcToB+kfsI0lkrZYbDLgC9pzi6OYYJ/aE=
go.temporal.io/api v1.7.1-0.20220127213442-107e361dceeb h1:snXrLlZzZxtPBAdEg4h29IJzjvsZNbMHTsQL+MK2TCU=
go.temporal.io/api v1.7.1-0.20220127213442-107e361dceeb/go.mod h1:sKL6Cu/CbleQ+tjl2/GnMAIk/yvVbOnpgzAlV6WyEso=
go.temporal.io/sdk v1.12.0 h1:QkqOpmgXVnHHCFP9HbSbyrF3jYgLBKY/3NdZyR7e5nQ=
go.temporal.io/sdk v1.12.0/go.mod h1:lSp3lH1lI0TyOsus0arnO3FYvjVXBZGi/G7DjnAnm6o=
go.temporal.io/version v0.3.0 h1:dMrei9l9NyHt8nG6EB8vAwDLLTwx2SvRyucCSumAiig=
Expand Down Expand Up @@ -580,8 +580,8 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba h1:6u6sik+bn/y7vILcYkK3iwTBWN7WtBvB0+SZswQnbf8=
golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
Expand Down
7 changes: 3 additions & 4 deletions host/sizelimit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,10 +197,9 @@ SignalLoop:
}
}
// Signalling workflow should result in force terminating the workflow execution and returns with ResourceExhausted
// error. ResourceExhausted is retried by the client and eventually results in NotFound error returned back to the
// caller as workflow execution is no longer running.
s.EqualError(signalErr, "workflow execution already completed")
s.IsType(&serviceerror.NotFound{}, signalErr)
// error. InvalidArgument is returned by the client.
s.EqualError(signalErr, "Workflow history size / count exceeds limit.")
s.IsType(&serviceerror.InvalidArgument{}, signalErr)

s.printWorkflowHistory(s.namespace, &commonpb.WorkflowExecution{
WorkflowId: id,
Expand Down
4 changes: 2 additions & 2 deletions service/frontend/adminHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -1704,7 +1704,7 @@ func (adh *AdminHandler) startRequestProfile(scope int) (metrics.Scope, metrics.
}

func (adh *AdminHandler) error(err error, scope metrics.Scope) error {
switch err.(type) {
switch err := err.(type) {
case *serviceerror.Unavailable:
adh.logger.Error("unavailable error", tag.Error(err))
scope.IncCounter(metrics.ServiceFailures)
Expand All @@ -1713,7 +1713,7 @@ func (adh *AdminHandler) error(err error, scope metrics.Scope) error {
scope.IncCounter(metrics.ServiceErrInvalidArgumentCounter)
return err
case *serviceerror.ResourceExhausted:
scope.IncCounter(metrics.ServiceErrResourceExhaustedCounter)
scope.Tagged(metrics.ResourceExhaustedCauseTag(err.Cause)).IncCounter(metrics.ServiceErrResourceExhaustedCounter)
return err
case *serviceerror.NotFound:
return err
Expand Down
4 changes: 2 additions & 2 deletions service/history/consts/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ var (
// ErrDeserializingToken is the error to indicate task token is invalid
ErrDeserializingToken = serviceerror.NewInvalidArgument("error deserializing task token")
// ErrSignalsLimitExceeded is the error indicating limit reached for maximum number of signal events
ErrSignalsLimitExceeded = serviceerror.NewResourceExhausted("exceeded workflow execution limit for signal events")
ErrSignalsLimitExceeded = serviceerror.NewInvalidArgument("exceeded workflow execution limit for signal events")
// ErrEventsAterWorkflowFinish is the error indicating server error trying to write events after workflow finish event
ErrEventsAterWorkflowFinish = serviceerror.NewInternal("error validating last event being workflow finish event")
// ErrQueryEnteredInvalidState is error indicating query entered invalid state
Expand All @@ -73,7 +73,7 @@ var (
// ErrEmptyHistoryRawEventBatch indicate that one single batch of history raw events is of size 0
ErrEmptyHistoryRawEventBatch = serviceerror.NewInvalidArgument("encountered empty history batch")
// ErrSizeExceedsLimit is error indicating workflow execution has exceeded system defined limit
ErrSizeExceedsLimit = serviceerror.NewResourceExhausted(common.FailureReasonSizeExceedsLimit)
ErrSizeExceedsLimit = serviceerror.NewInvalidArgument(common.FailureReasonSizeExceedsLimit)
// ErrUnknownCluster is error indicating unknown cluster
ErrUnknownCluster = serviceerror.NewInvalidArgument("unknown cluster")
// ErrBufferedQueryCleared is error indicating mutable state is cleared while buffered query is pending
Expand Down
37 changes: 19 additions & 18 deletions service/history/replicationTaskProcessor.go
Original file line number Diff line number Diff line change
Expand Up @@ -514,38 +514,39 @@ func (p *ReplicationTaskProcessorImpl) cleanupReplicationTasks() error {
}

func (p *ReplicationTaskProcessorImpl) emitTaskMetrics(scope int, err error) {
metricsScope := p.metricsClient.Scope(scope)
if common.IsContextDeadlineExceededErr(err) || common.IsContextCanceledErr(err) {
p.metricsClient.IncCounter(scope, metrics.ServiceErrContextTimeoutCounter)
metricsScope.IncCounter(metrics.ServiceErrContextTimeoutCounter)
return
}

// Also update counter to distinguish between type of failures
switch err.(type) {
switch err := err.(type) {
case nil:
p.metricsClient.IncCounter(scope, metrics.ReplicationTasksApplied)
metricsScope.IncCounter(metrics.ReplicationTasksApplied)
case *serviceerrors.ShardOwnershipLost:
p.metricsClient.IncCounter(scope, metrics.ServiceErrShardOwnershipLostCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicationTasksFailed)
metricsScope.IncCounter(metrics.ServiceErrShardOwnershipLostCounter)
metricsScope.IncCounter(metrics.ReplicationTasksFailed)
case *serviceerror.InvalidArgument:
p.metricsClient.IncCounter(scope, metrics.ServiceErrInvalidArgumentCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicationTasksFailed)
metricsScope.IncCounter(metrics.ServiceErrInvalidArgumentCounter)
metricsScope.IncCounter(metrics.ReplicationTasksFailed)
case *serviceerror.NamespaceNotActive:
p.metricsClient.IncCounter(scope, metrics.ServiceErrNamespaceNotActiveCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicationTasksFailed)
metricsScope.IncCounter(metrics.ServiceErrNamespaceNotActiveCounter)
metricsScope.IncCounter(metrics.ReplicationTasksFailed)
case *serviceerror.WorkflowExecutionAlreadyStarted:
p.metricsClient.IncCounter(scope, metrics.ServiceErrExecutionAlreadyStartedCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicationTasksFailed)
metricsScope.IncCounter(metrics.ServiceErrExecutionAlreadyStartedCounter)
metricsScope.IncCounter(metrics.ReplicationTasksFailed)
case *serviceerror.NotFound:
p.metricsClient.IncCounter(scope, metrics.ServiceErrNotFoundCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicationTasksFailed)
metricsScope.IncCounter(metrics.ServiceErrNotFoundCounter)
metricsScope.IncCounter(metrics.ReplicationTasksFailed)
case *serviceerror.ResourceExhausted:
p.metricsClient.IncCounter(scope, metrics.ServiceErrResourceExhaustedCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicationTasksFailed)
metricsScope.Tagged(metrics.ResourceExhaustedCauseTag(err.Cause)).IncCounter(metrics.ServiceErrResourceExhaustedCounter)
metricsScope.IncCounter(metrics.ReplicationTasksFailed)
case *serviceerrors.RetryReplication:
p.metricsClient.IncCounter(scope, metrics.ServiceErrRetryTaskCounter)
p.metricsClient.IncCounter(scope, metrics.ReplicationTasksFailed)
metricsScope.IncCounter(metrics.ServiceErrRetryTaskCounter)
metricsScope.IncCounter(metrics.ReplicationTasksFailed)
default:
p.metricsClient.IncCounter(scope, metrics.ReplicationTasksFailed)
metricsScope.IncCounter(metrics.ReplicationTasksFailed)
}
}

Expand Down
2 changes: 0 additions & 2 deletions service/matching/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,6 @@ const (

var (
_ matchingservice.MatchingServiceServer = (*Handler)(nil)

errMatchingHostThrottle = serviceerror.NewResourceExhausted("Matching host RPS exceeded.")
)

// NewHandler creates a gRPC handler for the matchingservice
Expand Down
Loading