diff --git a/CHANGELOG.md b/CHANGELOG.md index c3c8ed1b4d7..eb7fb378dee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,6 +73,7 @@ Here is an overview of all new **experimental** features: ### Improvements - **Cassandra Scaler**: Add TLS support for cassandra scaler ([#5802](https://github.com/kedacore/keda/issues/5802)) +- **GCP Pub/Sub**: Add optional valueIfNull to allow a default scaling value and prevent errors when GCP metric returns no value. ([#5896](https://github.com/kedacore/keda/issues/5896)) - **GCP Scalers**: Added custom time horizon in GCP scalers ([#5778](https://github.com/kedacore/keda/issues/5778)) - **GitHub Scaler**: Fixed pagination, fetching repository list ([#5738](https://github.com/kedacore/keda/issues/5738)) - **Kafka**: Fix logic to scale to zero on invalid offset even with earliest offsetResetPolicy ([#5689](https://github.com/kedacore/keda/issues/5689)) diff --git a/pkg/scalers/gcp/gcp_stackdriver_client.go b/pkg/scalers/gcp/gcp_stackdriver_client.go index 7384f035b59..87893a3979e 100644 --- a/pkg/scalers/gcp/gcp_stackdriver_client.go +++ b/pkg/scalers/gcp/gcp_stackdriver_client.go @@ -288,7 +288,7 @@ func (s StackDriverClient) GetMetrics( // // MQL provides a more expressive query language than // the current filtering options of GetMetrics -func (s StackDriverClient) QueryMetrics(ctx context.Context, projectID, query string) (float64, error) { +func (s StackDriverClient) QueryMetrics(ctx context.Context, projectID, query string, valueIfNull *float64) (float64, error) { req := &monitoringpb.QueryTimeSeriesRequest{ Query: query, PageSize: 1, @@ -303,7 +303,10 @@ func (s StackDriverClient) QueryMetrics(ctx context.Context, projectID, query st resp, err := it.Next() if err == iterator.Done { - return value, fmt.Errorf("could not find stackdriver metric with query %s", req.Query) + if valueIfNull == nil { + return value, fmt.Errorf("could not find stackdriver metric with query %s", req.Query) + } + return *valueIfNull, nil } if err != nil { diff --git a/pkg/scalers/gcp_pubsub_scaler.go b/pkg/scalers/gcp_pubsub_scaler.go index dd9b9873ebc..4d4534a3305 100644 --- a/pkg/scalers/gcp_pubsub_scaler.go +++ b/pkg/scalers/gcp_pubsub_scaler.go @@ -49,6 +49,7 @@ type pubsubMetadata struct { triggerIndex int aggregation string timeHorizon string + valueIfNull *float64 } // NewPubSubScaler creates a new pubsubScaler @@ -177,6 +178,14 @@ func parsePubSubMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger) } meta.value = triggerValue } + + if val, ok := config.TriggerMetadata["valueIfNull"]; ok && val != "" { + valueIfNull, err := strconv.ParseFloat(val, 64) + if err != nil { + return nil, fmt.Errorf("valueIfNull parsing error %w", err) + } + meta.valueIfNull = &valueIfNull + } } meta.aggregation = config.TriggerMetadata["aggregation"] @@ -291,7 +300,7 @@ func (s *pubsubScaler) getMetrics(ctx context.Context, metricType string) (float // Pubsub metrics are collected every 60 seconds so no need to aggregate them. // See: https://cloud.google.com/monitoring/api/metrics_gcp#gcp-pubsub - return s.client.QueryMetrics(ctx, projectID, query) + return s.client.QueryMetrics(ctx, projectID, query, s.metadata.valueIfNull) } func getResourceData(s *pubsubScaler) (string, string) {