From 66de066967d5f62f212d83d4566f0eb8d24666f5 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Sun, 31 Dec 2023 13:10:56 +0100 Subject: [PATCH] Add new AWS podIdentity Signed-off-by: Jorge Turrado --- .github/workflows/pr-e2e.yml | 4 +- .github/workflows/template-main-e2e-test.yml | 2 +- .github/workflows/template-smoke-tests.yml | 2 +- .gitignore | 3 + CHANGELOG.md | 10 +- .../v1alpha1/zz_generated.deepcopy.go | 1 - apis/keda/v1alpha1/scaledobject_types.go | 18 +- apis/keda/v1alpha1/scaledobject_webhook.go | 64 +++- .../v1alpha1/scaledobject_webhook_test.go | 29 ++ .../v1alpha1/triggerauthentication_types.go | 17 +- .../v1alpha1/triggerauthentication_webhook.go | 9 + .../triggerauthentication_webhook_test.go | 160 ++++++++- apis/keda/v1alpha1/zz_generated.deepcopy.go | 21 +- ...keda.sh_clustertriggerauthentications.yaml | 38 ++ .../bases/keda.sh_triggerauthentications.yaml | 38 ++ controllers/keda/scaledjob_controller.go | 12 +- controllers/keda/scaledjob_controller_test.go | 181 ++++++++++ controllers/keda/scaledobject_controller.go | 10 +- .../keda/scaledobject_controller_test.go | 118 +++++++ controllers/keda/suite_test.go | 7 + pkg/eventemitter/eventemitter.go | 24 ++ pkg/metricscollector/metricscollectors.go | 30 ++ pkg/metricscollector/opentelemetry.go | 57 +++ pkg/metricscollector/prommetrics.go | 41 +++ pkg/metricsservice/api/metrics.pb.go | 2 +- pkg/metricsservice/api/metrics_grpc.pb.go | 2 +- pkg/scalers/apache_kafka_scaler.go | 7 +- pkg/scalers/aws/aws_authorization.go | 17 + pkg/scalers/aws/aws_common.go | 113 ++++++ pkg/scalers/aws/aws_config_cache.go | 119 +++++++ pkg/scalers/aws/aws_config_cache_test.go | 91 +++++ pkg/scalers/aws_cloudwatch_scaler.go | 9 +- pkg/scalers/aws_cloudwatch_scaler_test.go | 12 +- pkg/scalers/aws_common.go | 101 ------ pkg/scalers/aws_dynamodb_scaler.go | 8 +- pkg/scalers/aws_dynamodb_scaler_test.go | 36 +- pkg/scalers/aws_dynamodb_streams_scaler.go | 8 +- .../aws_dynamodb_streams_scaler_test.go | 54 +-- pkg/scalers/aws_kinesis_stream_scaler.go | 8 +- pkg/scalers/aws_kinesis_stream_scaler_test.go | 54 +-- pkg/scalers/aws_sqs_queue_scaler.go | 8 +- .../externalscaler/externalscaler.pb.go | 2 +- .../externalscaler/externalscaler_grpc.pb.go | 2 +- pkg/scalers/liiklus/LiiklusService.pb.go | 2 +- pkg/scalers/liiklus/LiiklusService_grpc.pb.go | 2 +- pkg/scalers/scaler.go | 66 ++++ pkg/scalers/scaler_test.go | 243 +++++++++++++ pkg/scaling/resolver/scale_resolvers.go | 90 +++-- pkg/scaling/resolver/scale_resolvers_test.go | 51 ++- pkg/scaling/scale_handler_test.go | 122 +++++++ pkg/scaling/scalers_builder.go | 14 + tests/helper/helper.go | 16 + .../pause_scaledjob/pause_scaledjob_test.go | 16 + .../pause_scaledobject_explicitly_test.go | 49 +-- tests/run-all.go | 17 + .../aws_cloudwatch_pod_identity_test.go | 2 +- .../aws_cloudwatch_pod_identity_eks_test.go | 225 ++++++++++++ .../aws_dynamodb_pod_identity_test.go | 2 +- .../aws_dynamodb_pod_identity_eks_test.go | 277 +++++++++++++++ .../aws_dynamodb_streams_pod_identity_test.go | 2 +- ..._dynamodb_streams_pod_identity_eks_test.go | 294 ++++++++++++++++ .../aws_kinesis_stream_pod_identity_test.go | 2 +- ...ws_kinesis_stream_pod_identity_eks_test.go | 239 +++++++++++++ .../aws_sqs_queue_pod_identity_test.go | 2 +- .../aws_sqs_queue_pod_identity_eks_test.go | 219 ++++++++++++ .../azure_managed_prometheus/helper/helper.go | 4 +- tests/scalers/loki/loki_test.go | 4 +- tests/scalers/prometheus/prometheus_test.go | 4 +- .../aws_identity_assume_role_test.go | 327 ++++++++++++++++++ .../hashicorp_vault/hashicorp_vault_test.go | 202 +++++------ .../opentelemetry_metrics_test.go | 202 +++++++++-- .../prometheus_metrics_test.go | 202 +++++++++-- 72 files changed, 3977 insertions(+), 469 deletions(-) create mode 100644 controllers/keda/scaledjob_controller_test.go create mode 100644 pkg/scalers/aws/aws_authorization.go create mode 100644 pkg/scalers/aws/aws_common.go create mode 100644 pkg/scalers/aws/aws_config_cache.go create mode 100644 pkg/scalers/aws/aws_config_cache_test.go delete mode 100644 pkg/scalers/aws_common.go create mode 100644 tests/scalers/aws/aws_cloudwatch_pod_identity_eks/aws_cloudwatch_pod_identity_eks_test.go create mode 100644 tests/scalers/aws/aws_dynamodb_pod_identity_eks/aws_dynamodb_pod_identity_eks_test.go create mode 100644 tests/scalers/aws/aws_dynamodb_streams_pod_identity_eks/aws_dynamodb_streams_pod_identity_eks_test.go create mode 100644 tests/scalers/aws/aws_kinesis_stream_pod_identity_eks/aws_kinesis_stream_pod_identity_eks_test.go create mode 100644 tests/scalers/aws/aws_sqs_queue_pod_identity_eks/aws_sqs_queue_pod_identity_eks_test.go create mode 100644 tests/secret-providers/aws_identity_assume_role/aws_identity_assume_role_test.go diff --git a/.github/workflows/pr-e2e.yml b/.github/workflows/pr-e2e.yml index 2486ef468a5..f169b4d66a3 100644 --- a/.github/workflows/pr-e2e.yml +++ b/.github/workflows/pr-e2e.yml @@ -22,7 +22,7 @@ jobs: id: checkUserMember with: username: ${{ github.actor }} - team: 'keda-e2e-test-executors' + team: "keda-e2e-test-executors" GITHUB_TOKEN: ${{ secrets.GH_CHECKING_USER_AUTH }} - name: Update comment with the execution url @@ -221,5 +221,5 @@ jobs: uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4 with: name: e2e-test-logs - path: '${{ github.workspace }}/tests/**/*.log' + path: "${{ github.workspace }}/**/*.log" if-no-files-found: ignore diff --git a/.github/workflows/template-main-e2e-test.yml b/.github/workflows/template-main-e2e-test.yml index 78400244ab7..1b84ae767d4 100644 --- a/.github/workflows/template-main-e2e-test.yml +++ b/.github/workflows/template-main-e2e-test.yml @@ -51,5 +51,5 @@ jobs: if: ${{ always() }} with: name: e2e-test-logs - path: '${{ github.workspace }}/tests/**/*.log' + path: "${{ github.workspace }}/**/*.log" if-no-files-found: ignore diff --git a/.github/workflows/template-smoke-tests.yml b/.github/workflows/template-smoke-tests.yml index 75a8959935b..a01b234e7ac 100644 --- a/.github/workflows/template-smoke-tests.yml +++ b/.github/workflows/template-smoke-tests.yml @@ -48,5 +48,5 @@ jobs: if: ${{ always() }} with: name: smoke-test-logs ${{ inputs.runs-on }}-${{ inputs.kubernetesVersion }} - path: "${{ github.workspace }}/tests/**/*.log" + path: "${{ github.workspace }}/**/*.log" if-no-files-found: ignore diff --git a/.gitignore b/.gitignore index 51f73f9d700..9040c59a8ea 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,6 @@ __debug_bin # GO Test result report.xml + +# KEDA Certs +certs/* diff --git a/CHANGELOG.md b/CHANGELOG.md index 023748c4b41..e9040399049 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,7 +51,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ### New -- **General**: TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +- **General**: Introduce new AWS Authentication ([#4134](https://github.com/kedacore/keda/issues/4134)) #### Experimental @@ -61,8 +61,13 @@ Here is an overview of all new **experimental** features: ### Improvements +- **General**: Add CloudEventSource metrics in Prometheus & OpenTelemetry ([#3531](https://github.com/kedacore/keda/issues/3531)) - **General**: Add parameter queryParameters to prometheus-scaler ([#4962](https://github.com/kedacore/keda/issues/4962)) - **General**: Add validations for replica counts when creating ScaledObjects ([#5288](https://github.com/kedacore/keda/issues/5288)) +- **General**: Bubble up AuthRef TriggerAuthentication errors as ScaledObject events ([#5190](https://github.com/kedacore/keda/issues/5190)) +- **General**: Enhance podIdentity Role Assumption in AWS by Direct Integration with OIDC/Federation ([#5178](https://github.com/kedacore/keda/issues/5178)) +- **General**: Fix issue where paused annotation being set to false still leads to scaled objects/jobs being paused ([#5215](https://github.com/kedacore/keda/issues/5215)) +- **General**: Implement Credentials Cache for AWS Roles to reduce AWS API calls ([#5297](https://github.com/kedacore/keda/issues/5297)) - **General**: Support TriggerAuthentication properties from ConfigMap ([#4830](https://github.com/kedacore/keda/issues/4830)) - **General**: Use client-side round-robin load balancing for grpc calls ([#5224](https://github.com/kedacore/keda/issues/5224)) - **GCP pubsub scaler**: Support distribution-valued metrics and metrics from topics ([#5070](https://github.com/kedacore/keda/issues/5070)) @@ -81,6 +86,7 @@ Here is an overview of all new **experimental** features: - **General**: Fix otelgrpc DoS vulnerability ([#5208](https://github.com/kedacore/keda/issues/5208)) - **General**: Prevented memory leak generated by not correctly cleaning http connections ([#5248](https://github.com/kedacore/keda/issues/5248)) - **General**: Prevented stuck status due to timeouts during scalers generation ([#5083](https://github.com/kedacore/keda/issues/5083)) +- **General**: ScaledObject Validating Webhook should support dry-run=server requests ([#5306](https://github.com/kedacore/keda/issues/5306)) - **AWS Scalers**: Ensure session tokens are included when instantiating AWS credentials ([#5156](https://github.com/kedacore/keda/issues/5156)) - **Azure Pipelines**: No more HTTP 400 errors produced by poolName with spaces ([#5107](https://github.com/kedacore/keda/issues/5107)) - **GCP pubsub scaler**: Added `project_id` to filter for metrics queries ([#5256](https://github.com/kedacore/keda/issues/5256)) @@ -103,7 +109,7 @@ New deprecation(s): - **General**: Clean up previously deprecated code in Azure Data Explorer Scaler about clientSecret for 2.13 release ([#5051](https://github.com/kedacore/keda/issues/5051)) ### Other - +- **General**: Create a common utility function to get parameter value from config ([#5037](https://github.com/kedacore/keda/issues/5037)) - **General**: Fix CVE-2023-45142 in Opentelemetry ([#5089](https://github.com/kedacore/keda/issues/5089)) - **General**: Fix logger in Opentelemetry collector ([#5094](https://github.com/kedacore/keda/issues/5094)) - **General**: Reduce amount of gauge creations for OpenTelemetry metrics ([#5101](https://github.com/kedacore/keda/issues/5101)) diff --git a/apis/eventing/v1alpha1/zz_generated.deepcopy.go b/apis/eventing/v1alpha1/zz_generated.deepcopy.go index 395be2a4a89..4a59e8a0aaa 100644 --- a/apis/eventing/v1alpha1/zz_generated.deepcopy.go +++ b/apis/eventing/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2023 The KEDA Authors diff --git a/apis/keda/v1alpha1/scaledobject_types.go b/apis/keda/v1alpha1/scaledobject_types.go index a7357f5f804..0e8ddf614fe 100644 --- a/apis/keda/v1alpha1/scaledobject_types.go +++ b/apis/keda/v1alpha1/scaledobject_types.go @@ -19,6 +19,7 @@ package v1alpha1 import ( "fmt" "reflect" + "strconv" autoscalingv2 "k8s.io/api/autoscaling/v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -193,6 +194,11 @@ func (so *ScaledObject) GenerateIdentifier() string { return GenerateIdentifier("ScaledObject", so.Namespace, so.Name) } +func (so *ScaledObject) HasPausedReplicaAnnotation() bool { + _, pausedReplicasAnnotationFound := so.GetAnnotations()[PausedReplicasAnnotation] + return pausedReplicasAnnotationFound +} + // HasPausedAnnotition returns whether this ScaledObject has PausedAnnotation or PausedReplicasAnnotation func (so *ScaledObject) HasPausedAnnotation() bool { _, pausedAnnotationFound := so.GetAnnotations()[PausedAnnotation] @@ -207,8 +213,16 @@ func (so *ScaledObject) NeedToBePausedByAnnotation() bool { return so.Status.PausedReplicaCount != nil } - _, pausedAnnotationFound := so.GetAnnotations()[PausedAnnotation] - return pausedAnnotationFound + pausedAnnotationValue, pausedAnnotationFound := so.GetAnnotations()[PausedAnnotation] + if !pausedAnnotationFound { + return false + } + shouldPause, err := strconv.ParseBool(pausedAnnotationValue) + if err != nil { + // if annotation value is not a boolean, we assume user wants to pause the ScaledObject + return true + } + return shouldPause } // IsUsingModifiers determines whether scalingModifiers are defined or not diff --git a/apis/keda/v1alpha1/scaledobject_webhook.go b/apis/keda/v1alpha1/scaledobject_webhook.go index 3c24719a5a3..b487024b013 100644 --- a/apis/keda/v1alpha1/scaledobject_webhook.go +++ b/apis/keda/v1alpha1/scaledobject_webhook.go @@ -53,22 +53,54 @@ func (so *ScaledObject) SetupWebhookWithManager(mgr ctrl.Manager) error { kc = mgr.GetClient() restMapper = mgr.GetRESTMapper() return ctrl.NewWebhookManagedBy(mgr). + WithValidator(&ScaledObjectCustomValidator{}). For(so). Complete() } // +kubebuilder:webhook:path=/validate-keda-sh-v1alpha1-scaledobject,mutating=false,failurePolicy=ignore,sideEffects=None,groups=keda.sh,resources=scaledobjects,verbs=create;update,versions=v1alpha1,name=vscaledobject.kb.io,admissionReviewVersions=v1 -var _ webhook.Validator = &ScaledObject{} +// ScaledObjectCustomValidator is a custom validator for ScaledObject objects +type ScaledObjectCustomValidator struct{} + +func (socv ScaledObjectCustomValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + request, err := admission.RequestFromContext(ctx) + if err != nil { + return nil, err + } + so := obj.(*ScaledObject) + return so.ValidateCreate(request.DryRun) +} + +func (socv ScaledObjectCustomValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings admission.Warnings, err error) { + request, err := admission.RequestFromContext(ctx) + if err != nil { + return nil, err + } + so := newObj.(*ScaledObject) + old := oldObj.(*ScaledObject) + return so.ValidateUpdate(old, request.DryRun) +} + +func (socv ScaledObjectCustomValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + request, err := admission.RequestFromContext(ctx) + if err != nil { + return nil, err + } + so := obj.(*ScaledObject) + return so.ValidateDelete(request.DryRun) +} + +var _ webhook.CustomValidator = &ScaledObjectCustomValidator{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (so *ScaledObject) ValidateCreate() (admission.Warnings, error) { +func (so *ScaledObject) ValidateCreate(dryRun *bool) (admission.Warnings, error) { val, _ := json.MarshalIndent(so, "", " ") scaledobjectlog.V(1).Info(fmt.Sprintf("validating scaledobject creation for %s", string(val))) - return validateWorkload(so, "create") + return validateWorkload(so, "create", *dryRun) } -func (so *ScaledObject) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { +func (so *ScaledObject) ValidateUpdate(old runtime.Object, dryRun *bool) (admission.Warnings, error) { val, _ := json.MarshalIndent(so, "", " ") scaledobjectlog.V(1).Info(fmt.Sprintf("validating scaledobject update for %s", string(val))) @@ -77,10 +109,10 @@ func (so *ScaledObject) ValidateUpdate(old runtime.Object) (admission.Warnings, return nil, nil } - return validateWorkload(so, "update") + return validateWorkload(so, "update", *dryRun) } -func (so *ScaledObject) ValidateDelete() (admission.Warnings, error) { +func (so *ScaledObject) ValidateDelete(_ *bool) (admission.Warnings, error) { return nil, nil } @@ -95,10 +127,10 @@ func isRemovingFinalizer(so *ScaledObject, old runtime.Object) bool { return len(so.ObjectMeta.Finalizers) == 0 && len(oldSo.ObjectMeta.Finalizers) == 1 && soSpecString == oldSoSpecString } -func validateWorkload(so *ScaledObject, action string) (admission.Warnings, error) { +func validateWorkload(so *ScaledObject, action string, dryRun bool) (admission.Warnings, error) { metricscollector.RecordScaledObjectValidatingTotal(so.Namespace, action) - verifyFunctions := []func(*ScaledObject, string) error{ + verifyFunctions := []func(*ScaledObject, string, bool) error{ verifyCPUMemoryScalers, verifyTriggers, verifyScaledObjects, @@ -107,7 +139,7 @@ func validateWorkload(so *ScaledObject, action string) (admission.Warnings, erro } for i := range verifyFunctions { - err := verifyFunctions[i](so, action) + err := verifyFunctions[i](so, action, dryRun) if err != nil { return nil, err } @@ -117,7 +149,7 @@ func validateWorkload(so *ScaledObject, action string) (admission.Warnings, erro return nil, nil } -func verifyReplicaCount(incomingSo *ScaledObject, action string) error { +func verifyReplicaCount(incomingSo *ScaledObject, action string, _ bool) error { err := CheckReplicaCountBoundsAreValid(incomingSo) if err != nil { scaledobjectlog.WithValues("name", incomingSo.Name).Error(err, "validation error") @@ -126,7 +158,7 @@ func verifyReplicaCount(incomingSo *ScaledObject, action string) error { return nil } -func verifyTriggers(incomingSo *ScaledObject, action string) error { +func verifyTriggers(incomingSo *ScaledObject, action string, _ bool) error { err := ValidateTriggers(incomingSo.Spec.Triggers) if err != nil { scaledobjectlog.WithValues("name", incomingSo.Name).Error(err, "validation error") @@ -135,7 +167,7 @@ func verifyTriggers(incomingSo *ScaledObject, action string) error { return err } -func verifyHpas(incomingSo *ScaledObject, action string) error { +func verifyHpas(incomingSo *ScaledObject, action string, _ bool) error { hpaList := &autoscalingv2.HorizontalPodAutoscalerList{} opt := &client.ListOptions{ Namespace: incomingSo.Namespace, @@ -190,7 +222,7 @@ func verifyHpas(incomingSo *ScaledObject, action string) error { return nil } -func verifyScaledObjects(incomingSo *ScaledObject, action string) error { +func verifyScaledObjects(incomingSo *ScaledObject, action string, _ bool) error { soList := &ScaledObjectList{} opt := &client.ListOptions{ Namespace: incomingSo.Namespace, @@ -241,7 +273,11 @@ func verifyScaledObjects(incomingSo *ScaledObject, action string) error { return nil } -func verifyCPUMemoryScalers(incomingSo *ScaledObject, action string) error { +func verifyCPUMemoryScalers(incomingSo *ScaledObject, action string, dryRun bool) error { + if dryRun { + return nil + } + var podSpec *corev1.PodSpec for _, trigger := range incomingSo.Spec.Triggers { if trigger.Type == cpuString || trigger.Type == memoryString { diff --git a/apis/keda/v1alpha1/scaledobject_webhook_test.go b/apis/keda/v1alpha1/scaledobject_webhook_test.go index 37c0c3a46d8..8c9dfe00df5 100644 --- a/apis/keda/v1alpha1/scaledobject_webhook_test.go +++ b/apis/keda/v1alpha1/scaledobject_webhook_test.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" ) var _ = It("should validate the so creation when there isn't any hpa", func() { @@ -232,6 +233,34 @@ var _ = It("should validate the so creation with cpu and memory when deployment }).ShouldNot(HaveOccurred()) }) +var _ = It("shouldn't validate the creation with cpu and memory when deployment is missing", func() { + + namespaceName := "deployment-missing" + namespace := createNamespace(namespaceName) + so := createScaledObject(soName, namespaceName, workloadName, "apps/v1", "Deployment", true, map[string]string{}, "") + + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).Should(HaveOccurred()) +}) + +var _ = It("should validate the creation with cpu and memory when deployment is missing and dry-run is true", func() { + + namespaceName := "deployment-missing-dry-run" + namespace := createNamespace(namespaceName) + so := createScaledObject(soName, namespaceName, workloadName, "apps/v1", "Deployment", true, map[string]string{}, "") + + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() error { + return k8sClient.Create(context.Background(), so, client.DryRunAll) + }).ShouldNot(HaveOccurred()) +}) + var _ = It("shouldn't validate the so creation with cpu and memory when deployment hasn't got memory request", func() { namespaceName := "deployment-no-memory-request" diff --git a/apis/keda/v1alpha1/triggerauthentication_types.go b/apis/keda/v1alpha1/triggerauthentication_types.go index 9483abedb6b..6973d0a957e 100644 --- a/apis/keda/v1alpha1/triggerauthentication_types.go +++ b/apis/keda/v1alpha1/triggerauthentication_types.go @@ -118,9 +118,9 @@ const ( PodIdentityProviderAzure PodIdentityProvider = "azure" PodIdentityProviderAzureWorkload PodIdentityProvider = "azure-workload" PodIdentityProviderGCP PodIdentityProvider = "gcp" - PodIdentityProviderSpiffe PodIdentityProvider = "spiffe" PodIdentityProviderAwsEKS PodIdentityProvider = "aws-eks" PodIdentityProviderAwsKiam PodIdentityProvider = "aws-kiam" + PodIdentityProviderAws PodIdentityProvider = "aws" ) // PodIdentityAnnotationEKS specifies aws role arn for aws-eks Identity Provider @@ -133,9 +133,17 @@ const ( // AuthPodIdentity allows users to select the platform native identity // mechanism type AuthPodIdentity struct { + // +kubebuilder:validation:Enum=azure;azure-workload;gcp;aws;aws-eks;aws-kiam Provider PodIdentityProvider `json:"provider"` // +optional IdentityID *string `json:"identityId"` + // +optional + // RoleArn sets the AWS RoleArn to be used. Mutually exclusive with IdentityOwner + RoleArn string `json:"roleArn"` + // +kubebuilder:validation:Enum=keda;workload + // +optional + // IdentityOwner configures which identity has to be used during auto discovery, keda or the scaled workload. Mutually exclusive with roleArn + IdentityOwner *string `json:"identityOwner"` } func (a *AuthPodIdentity) GetIdentityID() string { @@ -145,6 +153,13 @@ func (a *AuthPodIdentity) GetIdentityID() string { return *a.IdentityID } +func (a *AuthPodIdentity) IsWorkloadIdentityOwner() bool { + if a.IdentityOwner == nil { + return false + } + return *a.IdentityOwner == workloadString +} + // AuthConfigMapTargetRef is used to authenticate using a reference to a config map type AuthConfigMapTargetRef AuthTargetRef diff --git a/apis/keda/v1alpha1/triggerauthentication_webhook.go b/apis/keda/v1alpha1/triggerauthentication_webhook.go index 72b14e1b388..df77bbc1e18 100644 --- a/apis/keda/v1alpha1/triggerauthentication_webhook.go +++ b/apis/keda/v1alpha1/triggerauthentication_webhook.go @@ -28,6 +28,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) +const ( + kedaString = "keda" + workloadString = "workload" +) + var triggerauthenticationlog = logf.Log.WithName("triggerauthentication-validation-webhook") func (ta *TriggerAuthentication) SetupWebhookWithManager(mgr ctrl.Manager) error { @@ -113,6 +118,10 @@ func validateSpec(spec *TriggerAuthenticationSpec) (admission.Warnings, error) { if spec.PodIdentity.IdentityID != nil && *spec.PodIdentity.IdentityID == "" { return nil, fmt.Errorf("identityid of PodIdentity should not be empty. If it's set, identityId has to be different than \"\"") } + case PodIdentityProviderAws: + if spec.PodIdentity.RoleArn != "" && spec.PodIdentity.IsWorkloadIdentityOwner() { + return nil, fmt.Errorf("roleArn of PodIdentity can't be set if KEDA isn't identityOwner") + } default: return nil, nil } diff --git a/apis/keda/v1alpha1/triggerauthentication_webhook_test.go b/apis/keda/v1alpha1/triggerauthentication_webhook_test.go index b18585ff97b..44ea8ed762c 100644 --- a/apis/keda/v1alpha1/triggerauthentication_webhook_test.go +++ b/apis/keda/v1alpha1/triggerauthentication_webhook_test.go @@ -24,13 +24,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -var _ = It("validate triggerauthentication when IdentityID is nil", func() { +var _ = It("validate triggerauthentication when IdentityID is nil, roleArn is empty and identityOwner is nil", func() { namespaceName := "nilidentityid" namespace := createNamespace(namespaceName) err := k8sClient.Create(context.Background(), namespace) Expect(err).ToNot(HaveOccurred()) - spec := createTriggerAuthenticationSpecWithPodIdentity(nil) + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAzure, "", nil, nil) ta := createTriggerAuthentication("nilidentityidta", namespaceName, "TriggerAuthentication", spec) Eventually(func() error { return k8sClient.Create(context.Background(), ta) @@ -44,7 +44,7 @@ var _ = It("validate triggerauthentication when IdentityID is empty", func() { Expect(err).ToNot(HaveOccurred()) identityID := "" - spec := createTriggerAuthenticationSpecWithPodIdentity(&identityID) + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAzure, "", &identityID, nil) ta := createTriggerAuthentication("emptyidentityidta", namespaceName, "TriggerAuthentication", spec) Eventually(func() error { return k8sClient.Create(context.Background(), ta) @@ -58,7 +58,76 @@ var _ = It("validate triggerauthentication when IdentityID is not empty", func() Expect(err).ToNot(HaveOccurred()) identityID := "12345" - spec := createTriggerAuthenticationSpecWithPodIdentity(&identityID) + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAzure, "", &identityID, nil) + ta := createTriggerAuthentication("identityidta", namespaceName, "TriggerAuthentication", spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ta) + }).ShouldNot(HaveOccurred()) +}) + +var _ = It("validate triggerauthentication when RoleArn is not empty and IdentityOwner is nil", func() { + namespaceName := "rolearn" + namespace := createNamespace(namespaceName) + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "Helo", nil, nil) + ta := createTriggerAuthentication("identityidta", namespaceName, "TriggerAuthentication", spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ta) + }).ShouldNot(HaveOccurred()) +}) + +var _ = It("validate triggerauthentication when RoleArn is not empty and IdentityOwner is keda", func() { + namespaceName := "rolearnandkedaowner" + namespace := createNamespace(namespaceName) + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + identityOwner := kedaString + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "Helo", nil, &identityOwner) + ta := createTriggerAuthentication("identityidta", namespaceName, "TriggerAuthentication", spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ta) + }).ShouldNot(HaveOccurred()) +}) + +var _ = It("validate triggerauthentication when RoleArn is not empty and IdentityOwner is workload", func() { + namespaceName := "rolearnandworkloadowner" + namespace := createNamespace(namespaceName) + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + identityOwner := workloadString + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "Helo", nil, &identityOwner) + ta := createTriggerAuthentication("identityidta", namespaceName, "TriggerAuthentication", spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ta) + }).Should(HaveOccurred()) +}) + +var _ = It("validate triggerauthentication when RoleArn is empty and IdentityOwner is keda", func() { + namespaceName := "kedaowner" + namespace := createNamespace(namespaceName) + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + identityOwner := kedaString + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "", nil, &identityOwner) + ta := createTriggerAuthentication("identityidta", namespaceName, "TriggerAuthentication", spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ta) + }).ShouldNot(HaveOccurred()) +}) + +var _ = It("validate triggerauthentication when RoleArn is not empty and IdentityOwner is workload", func() { + namespaceName := "workloadowner" + namespace := createNamespace(namespaceName) + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + identityOwner := workloadString + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "", nil, &identityOwner) ta := createTriggerAuthentication("identityidta", namespaceName, "TriggerAuthentication", spec) Eventually(func() error { return k8sClient.Create(context.Background(), ta) @@ -71,7 +140,7 @@ var _ = It("validate clustertriggerauthentication when IdentityID is nil", func( err := k8sClient.Create(context.Background(), namespace) Expect(err).ToNot(HaveOccurred()) - spec := createTriggerAuthenticationSpecWithPodIdentity(nil) + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAzure, "", nil, nil) ta := createTriggerAuthentication("clusternilidentityidta", namespaceName, "ClusterTriggerAuthentication", spec) Eventually(func() error { return k8sClient.Create(context.Background(), ta) @@ -85,7 +154,7 @@ var _ = It("validate clustertriggerauthentication when IdentityID is empty", fun Expect(err).ToNot(HaveOccurred()) identityID := "" - spec := createTriggerAuthenticationSpecWithPodIdentity(&identityID) + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAzure, "", &identityID, nil) ta := createTriggerAuthentication("clusteremptyidentityidta", namespaceName, "ClusterTriggerAuthentication", spec) Eventually(func() error { return k8sClient.Create(context.Background(), ta) @@ -99,18 +168,89 @@ var _ = It("validate clustertriggerauthentication when IdentityID is not empty", Expect(err).ToNot(HaveOccurred()) identityID := "12345" - spec := createTriggerAuthenticationSpecWithPodIdentity(&identityID) + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAzure, "", &identityID, nil) ta := createTriggerAuthentication("clusteridentityidta", namespaceName, "ClusterTriggerAuthentication", spec) Eventually(func() error { return k8sClient.Create(context.Background(), ta) }).ShouldNot(HaveOccurred()) }) -func createTriggerAuthenticationSpecWithPodIdentity(identityID *string) TriggerAuthenticationSpec { +var _ = It("validate clustertriggerauthentication when RoleArn is not empty and IdentityOwner is nil", func() { + namespaceName := "clusterrolearn" + namespace := createNamespace(namespaceName) + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "Helo", nil, nil) + ta := createTriggerAuthentication("clusteridentityidta", namespaceName, "ClusterTriggerAuthentication", spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ta) + }).ShouldNot(HaveOccurred()) +}) + +var _ = It("validate clustertriggerauthentication when RoleArn is not empty and IdentityOwner is keda", func() { + namespaceName := "clusterrolearnandkedaowner" + namespace := createNamespace(namespaceName) + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + identityOwner := kedaString + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "Helo", nil, &identityOwner) + ta := createTriggerAuthentication("clusteridentityidta", namespaceName, "ClusterTriggerAuthentication", spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ta) + }).ShouldNot(HaveOccurred()) +}) + +var _ = It("validate clustertriggerauthentication when RoleArn is not empty and IdentityOwner is workload", func() { + namespaceName := "clusterrolearnandworkloadowner" + namespace := createNamespace(namespaceName) + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + identityOwner := workloadString + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "Helo", nil, &identityOwner) + ta := createTriggerAuthentication("clusteridentityidta", namespaceName, "ClusterTriggerAuthentication", spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ta) + }).Should(HaveOccurred()) +}) + +var _ = It("validate clustertriggerauthentication when RoleArn is empty and IdentityOwner is keda", func() { + namespaceName := "clusterandkedaowner" + namespace := createNamespace(namespaceName) + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + identityOwner := kedaString + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "", nil, &identityOwner) + ta := createTriggerAuthentication("clusteridentityidta", namespaceName, "ClusterTriggerAuthentication", spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ta) + }).ShouldNot(HaveOccurred()) +}) + +var _ = It("validate clustertriggerauthentication when RoleArn is not empty and IdentityOwner is workload", func() { + namespaceName := "clusterandworkloadowner" + namespace := createNamespace(namespaceName) + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + identityOwner := workloadString + spec := createTriggerAuthenticationSpecWithPodIdentity(PodIdentityProviderAws, "", nil, &identityOwner) + ta := createTriggerAuthentication("clusteridentityidta", namespaceName, "TriggerAuthentication", spec) + Eventually(func() error { + return k8sClient.Create(context.Background(), ta) + }).ShouldNot(HaveOccurred()) +}) + +func createTriggerAuthenticationSpecWithPodIdentity(provider PodIdentityProvider, roleArn string, identityID, identityOwner *string) TriggerAuthenticationSpec { return TriggerAuthenticationSpec{ PodIdentity: &AuthPodIdentity{ - Provider: PodIdentityProviderAzure, - IdentityID: identityID, + Provider: provider, + IdentityID: identityID, + RoleArn: roleArn, + IdentityOwner: identityOwner, }, } } diff --git a/apis/keda/v1alpha1/zz_generated.deepcopy.go b/apis/keda/v1alpha1/zz_generated.deepcopy.go index 9435a9c4829..c9162874ac8 100755 --- a/apis/keda/v1alpha1/zz_generated.deepcopy.go +++ b/apis/keda/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2023 The KEDA Authors @@ -86,6 +85,11 @@ func (in *AuthPodIdentity) DeepCopyInto(out *AuthPodIdentity) { *out = new(string) **out = **in } + if in.IdentityOwner != nil { + in, out := &in.IdentityOwner, &out.IdentityOwner + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthPodIdentity. @@ -668,6 +672,21 @@ func (in *ScaledObject) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaledObjectCustomValidator) DeepCopyInto(out *ScaledObjectCustomValidator) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaledObjectCustomValidator. +func (in *ScaledObjectCustomValidator) DeepCopy() *ScaledObjectCustomValidator { + if in == nil { + return nil + } + out := new(ScaledObjectCustomValidator) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ScaledObjectList) DeepCopyInto(out *ScaledObjectList) { *out = *in diff --git a/config/crd/bases/keda.sh_clustertriggerauthentications.yaml b/config/crd/bases/keda.sh_clustertriggerauthentications.yaml index cc9cacc688f..1885fcd69f9 100644 --- a/config/crd/bases/keda.sh_clustertriggerauthentications.yaml +++ b/config/crd/bases/keda.sh_clustertriggerauthentications.yaml @@ -111,8 +111,27 @@ spec: properties: identityId: type: string + identityOwner: + description: IdentityOwner configures which identity has to + be used during auto discovery, keda or the scaled workload. + Mutually exclusive with roleArn + enum: + - keda + - workload + type: string provider: description: PodIdentityProvider contains the list of providers + enum: + - azure + - azure-workload + - gcp + - aws + - aws-eks + - aws-kiam + type: string + roleArn: + description: RoleArn sets the AWS RoleArn to be used. Mutually + exclusive with IdentityOwner type: string required: - provider @@ -243,8 +262,27 @@ spec: properties: identityId: type: string + identityOwner: + description: IdentityOwner configures which identity has to be + used during auto discovery, keda or the scaled workload. Mutually + exclusive with roleArn + enum: + - keda + - workload + type: string provider: description: PodIdentityProvider contains the list of providers + enum: + - azure + - azure-workload + - gcp + - aws + - aws-eks + - aws-kiam + type: string + roleArn: + description: RoleArn sets the AWS RoleArn to be used. Mutually + exclusive with IdentityOwner type: string required: - provider diff --git a/config/crd/bases/keda.sh_triggerauthentications.yaml b/config/crd/bases/keda.sh_triggerauthentications.yaml index 6589a44301b..c731c78d1c8 100644 --- a/config/crd/bases/keda.sh_triggerauthentications.yaml +++ b/config/crd/bases/keda.sh_triggerauthentications.yaml @@ -110,8 +110,27 @@ spec: properties: identityId: type: string + identityOwner: + description: IdentityOwner configures which identity has to + be used during auto discovery, keda or the scaled workload. + Mutually exclusive with roleArn + enum: + - keda + - workload + type: string provider: description: PodIdentityProvider contains the list of providers + enum: + - azure + - azure-workload + - gcp + - aws + - aws-eks + - aws-kiam + type: string + roleArn: + description: RoleArn sets the AWS RoleArn to be used. Mutually + exclusive with IdentityOwner type: string required: - provider @@ -242,8 +261,27 @@ spec: properties: identityId: type: string + identityOwner: + description: IdentityOwner configures which identity has to be + used during auto discovery, keda or the scaled workload. Mutually + exclusive with roleArn + enum: + - keda + - workload + type: string provider: description: PodIdentityProvider contains the list of providers + enum: + - azure + - azure-workload + - gcp + - aws + - aws-eks + - aws-kiam + type: string + roleArn: + description: RoleArn sets the AWS RoleArn to be used. Mutually + exclusive with IdentityOwner type: string required: - provider diff --git a/controllers/keda/scaledjob_controller.go b/controllers/keda/scaledjob_controller.go index 52e0b6e2eb8..1adde787874 100755 --- a/controllers/keda/scaledjob_controller.go +++ b/controllers/keda/scaledjob_controller.go @@ -19,6 +19,7 @@ package keda import ( "context" "fmt" + "strconv" "sync" "time" @@ -216,9 +217,17 @@ func (r *ScaledJobReconciler) reconcileScaledJob(ctx context.Context, logger log // checkIfPaused checks the presence of "autoscaling.keda.sh/paused" annotation on the scaledJob and stop the scale loop. func (r *ScaledJobReconciler) checkIfPaused(ctx context.Context, logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob, conditions *kedav1alpha1.Conditions) (bool, error) { - _, pausedAnnotation := scaledJob.GetAnnotations()[kedav1alpha1.PausedAnnotation] + pausedAnnotationValue, pausedAnnotation := scaledJob.GetAnnotations()[kedav1alpha1.PausedAnnotation] pausedStatus := conditions.GetPausedCondition().Status == metav1.ConditionTrue + shouldPause := false if pausedAnnotation { + var err error + shouldPause, err = strconv.ParseBool(pausedAnnotationValue) + if err != nil { + shouldPause = true + } + } + if shouldPause { if !pausedStatus { logger.Info("ScaledJob is paused, stopping scaling loop.") msg := kedav1alpha1.ScaledJobConditionPausedMessage @@ -286,7 +295,6 @@ func (r *ScaledJobReconciler) deletePreviousVersionScaleJobs(ctx context.Context // requestScaleLoop request ScaleLoop handler for the respective ScaledJob func (r *ScaledJobReconciler) requestScaleLoop(ctx context.Context, logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob) error { logger.V(1).Info("Starting a new ScaleLoop") - key, err := cache.MetaNamespaceKeyFunc(scaledJob) if err != nil { logger.Error(err, "Error getting key for scaledJob") diff --git a/controllers/keda/scaledjob_controller_test.go b/controllers/keda/scaledjob_controller_test.go new file mode 100644 index 00000000000..eb60bbd3b2c --- /dev/null +++ b/controllers/keda/scaledjob_controller_test.go @@ -0,0 +1,181 @@ +package keda + +import ( + "context" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" +) + +var _ = Describe("ScaledJobController", func() { + + var ( + testLogger = zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)) + ) + + Describe("functional tests", func() { + It("scaledjob paused condition status changes to true on annotation", func() { + jobName := "toggled-to-paused-annotation-name" + sjName := "sj-" + jobName + + sj := &kedav1alpha1.ScaledJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: sjName, + Namespace: "default", + }, + Spec: kedav1alpha1.ScaledJobSpec{ + JobTargetRef: generateJobSpec(jobName), + Triggers: []kedav1alpha1.ScaleTriggers{ + { + Type: "cron", + Metadata: map[string]string{ + "timezone": "UTC", + "start": "0 * * * *", + "end": "1 * * * *", + "desiredReplicas": "1", + }, + }, + }, + }, + } + pollingInterval := int32(5) + sj.Spec.PollingInterval = &pollingInterval + err := k8sClient.Create(context.Background(), sj) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() metav1.ConditionStatus { + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: sjName, Namespace: "default"}, sj) + if err != nil { + return metav1.ConditionTrue + } + return sj.Status.Conditions.GetPausedCondition().Status + }, 5*time.Second).Should(Or(Equal(metav1.ConditionFalse), Equal(metav1.ConditionUnknown))) + + // set annotation + Eventually(func() error { + err = k8sClient.Get(context.Background(), types.NamespacedName{Name: sjName, Namespace: "default"}, sj) + Expect(err).ToNot(HaveOccurred()) + annotations := make(map[string]string) + annotations[kedav1alpha1.PausedAnnotation] = "true" + sj.SetAnnotations(annotations) + pollingInterval := int32(6) + sj.Spec.PollingInterval = &pollingInterval + return k8sClient.Update(context.Background(), sj) + }).WithTimeout(1 * time.Minute).WithPolling(10 * time.Second).ShouldNot(HaveOccurred()) + testLogger.Info("annotation is set") + + // validate annotation is set correctly + Eventually(func() bool { + err = k8sClient.Get(context.Background(), types.NamespacedName{Name: sjName, Namespace: "default"}, sj) + Expect(err).ToNot(HaveOccurred()) + _, hasAnnotation := sj.GetAnnotations()[kedav1alpha1.PausedAnnotation] + return hasAnnotation + }).WithTimeout(1 * time.Minute).WithPolling(2 * time.Second).Should(BeTrue()) + + Eventually(func() metav1.ConditionStatus { + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: sjName, Namespace: "default"}, sj) + if err != nil { + return metav1.ConditionUnknown + } + return sj.Status.Conditions.GetPausedCondition().Status + }).WithTimeout(2 * time.Minute).WithPolling(10 * time.Second).Should(Equal(metav1.ConditionTrue)) + }) + It("scaledjob paused status stays false when annotation is set to false", func() { + jobName := "turn-off-paused-annotation-name" + sjName := "sj-" + jobName + // create object already paused + sj := &kedav1alpha1.ScaledJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: sjName, + Namespace: "default", + }, + Spec: kedav1alpha1.ScaledJobSpec{ + JobTargetRef: generateJobSpec(jobName), + Triggers: []kedav1alpha1.ScaleTriggers{ + { + Type: "cron", + Metadata: map[string]string{ + "timezone": "UTC", + "start": "0 * * * *", + "end": "1 * * * *", + "desiredReplicas": "1", + }, + }, + }, + }, + } + pollingInterval := int32(5) + sj.Spec.PollingInterval = &pollingInterval + err := k8sClient.Create(context.Background(), sj) + Expect(err).ToNot(HaveOccurred()) + falseAnnotationValue := "false" + // set annotation + Eventually(func() error { + err = k8sClient.Get(context.Background(), types.NamespacedName{Name: sjName, Namespace: "default"}, sj) + Expect(err).ToNot(HaveOccurred()) + annotations := make(map[string]string) + annotations[kedav1alpha1.PausedAnnotation] = falseAnnotationValue + sj.SetAnnotations(annotations) + pollingInterval := int32(6) + sj.Spec.PollingInterval = &pollingInterval + return k8sClient.Update(context.Background(), sj) + }).WithTimeout(1 * time.Minute).WithPolling(10 * time.Second).ShouldNot(HaveOccurred()) + testLogger.Info("annotation is set") + + // validate annotation is set correctly + Eventually(func() bool { + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: sjName, Namespace: "default"}, sj) + Expect(err).ToNot(HaveOccurred()) + value, hasPausedAnnotation := sj.GetAnnotations()[kedav1alpha1.PausedAnnotation] + if !hasPausedAnnotation { + return false + } + return value == falseAnnotationValue + }).WithTimeout(1 * time.Minute).WithPolling(2 * time.Second).Should(BeTrue()) + + // TODO(nappelson) - update assertion to be ConditionFalse + // https://github.com/kedacore/keda/issues/5251 prevents Condition from updating appropriately + Eventually(func() metav1.ConditionStatus { + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: sjName, Namespace: "default"}, sj) + if err != nil { + return metav1.ConditionUnknown + } + return sj.Status.Conditions.GetPausedCondition().Status + }).WithTimeout(1 * time.Minute).WithPolling(10 * time.Second).Should(Equal(metav1.ConditionUnknown)) + }) + + }) +}) + +func generateJobSpec(name string) *batchv1.JobSpec { + return &batchv1.JobSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": name, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": name, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: name, + Image: name, + }, + }, + }, + }, + } +} diff --git a/controllers/keda/scaledobject_controller.go b/controllers/keda/scaledobject_controller.go index 3b6ff37ff05..55d83a03b8f 100755 --- a/controllers/keda/scaledobject_controller.go +++ b/controllers/keda/scaledobject_controller.go @@ -216,10 +216,10 @@ func (r *ScaledObjectReconciler) Reconcile(ctx context.Context, req ctrl.Request // reconcileScaledObject implements reconciler logic for ScaledObject func (r *ScaledObjectReconciler) reconcileScaledObject(ctx context.Context, logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, conditions *kedav1alpha1.Conditions) (string, error) { - // Check the presence of "autoscaling.keda.sh/paused-replicas" annotation on the scaledObject (since the presence of this annotation will pause + // Check the presence of "autoscaling.keda.sh/paused" annotation on the scaledObject (since the presence of this annotation will pause // autoscaling no matter what number of replicas is provided), and if so, stop the scale loop and delete the HPA on the scaled object. - pausedAnnotationFound := scaledObject.HasPausedAnnotation() - if pausedAnnotationFound { + needsToPause := scaledObject.NeedToBePausedByAnnotation() + if needsToPause { scaledToPausedCount := true if conditions.GetPausedCondition().Status == metav1.ConditionTrue { // If scaledobject is in paused condition but replica count is not equal to paused replica count, the following scaling logic needs to be trigger again. @@ -228,7 +228,7 @@ func (r *ScaledObjectReconciler) reconcileScaledObject(ctx context.Context, logg return kedav1alpha1.ScaledObjectConditionReadySuccessMessage, nil } } - if scaledObject.NeedToBePausedByAnnotation() && scaledToPausedCount { + if scaledToPausedCount { msg := kedav1alpha1.ScaledObjectConditionPausedMessage if err := r.stopScaleLoop(ctx, logger, scaledObject); err != nil { msg = "failed to stop the scale loop for paused ScaledObject" @@ -298,7 +298,7 @@ func (r *ScaledObjectReconciler) reconcileScaledObject(ctx context.Context, logg } logger.Info("Initializing Scaling logic according to ScaledObject Specification") } - if pausedAnnotationFound && conditions.GetPausedCondition().Status != metav1.ConditionTrue { + if scaledObject.HasPausedReplicaAnnotation() && conditions.GetPausedCondition().Status != metav1.ConditionTrue { return "ScaledObject paused replicas are being scaled", fmt.Errorf("ScaledObject paused replicas are being scaled") } return kedav1alpha1.ScaledObjectConditionReadySuccessMessage, nil diff --git a/controllers/keda/scaledobject_controller_test.go b/controllers/keda/scaledobject_controller_test.go index c346b59f724..fd6e3cfe98e 100644 --- a/controllers/keda/scaledobject_controller_test.go +++ b/controllers/keda/scaledobject_controller_test.go @@ -934,6 +934,124 @@ var _ = Describe("ScaledObjectController", func() { return so.Status.Conditions.GetPausedCondition().Status }).WithTimeout(2 * time.Minute).WithPolling(10 * time.Second).Should(Equal(metav1.ConditionTrue)) }) + It("scaledObject paused status switches to false when annotation is set to false", func() { + // Create the scaling target. + deploymentName := "toggled-to-paused-annotation-false-name" + soName := "so-" + deploymentName + err := k8sClient.Create(context.Background(), generateDeployment(deploymentName)) + Expect(err).ToNot(HaveOccurred()) + + // Create the ScaledObject without specifying name. + so := &kedav1alpha1.ScaledObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: soName, + Namespace: "default", + }, + Spec: kedav1alpha1.ScaledObjectSpec{ + ScaleTargetRef: &kedav1alpha1.ScaleTarget{ + Name: deploymentName, + }, + Advanced: &kedav1alpha1.AdvancedConfig{ + HorizontalPodAutoscalerConfig: &kedav1alpha1.HorizontalPodAutoscalerConfig{}, + }, + Triggers: []kedav1alpha1.ScaleTriggers{ + { + Type: "cron", + Metadata: map[string]string{ + "timezone": "UTC", + "start": "0 * * * *", + "end": "1 * * * *", + "desiredReplicas": "1", + }, + }, + }, + }, + } + pollingInterval := int32(5) + so.Spec.PollingInterval = &pollingInterval + err = k8sClient.Create(context.Background(), so) + Expect(err).ToNot(HaveOccurred()) + + // And validate that hpa is created. + hpa := &autoscalingv2.HorizontalPodAutoscaler{} + Eventually(func() error { + return k8sClient.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("keda-hpa-%s", soName), Namespace: "default"}, hpa) + }).ShouldNot(HaveOccurred()) + + // wait so's ready condition Ready + Eventually(func() metav1.ConditionStatus { + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: soName, Namespace: "default"}, so) + if err != nil { + return metav1.ConditionUnknown + } + return so.Status.Conditions.GetReadyCondition().Status + }).Should(Equal(metav1.ConditionTrue)) + + Eventually(func() metav1.ConditionStatus { + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: soName, Namespace: "default"}, so) + if err != nil { + return metav1.ConditionTrue + } + return so.Status.Conditions.GetPausedCondition().Status + }, 5*time.Second).Should(Or(Equal(metav1.ConditionFalse), Equal(metav1.ConditionUnknown))) + + // set annotation to true at first + Eventually(func() error { + err = k8sClient.Get(context.Background(), types.NamespacedName{Name: soName, Namespace: "default"}, so) + Expect(err).ToNot(HaveOccurred()) + annotations := make(map[string]string) + annotations[kedav1alpha1.PausedAnnotation] = "true" + so.SetAnnotations(annotations) + pollingInterval := int32(6) + so.Spec.PollingInterval = &pollingInterval + return k8sClient.Update(context.Background(), so) + }).WithTimeout(1 * time.Minute).WithPolling(10 * time.Second).ShouldNot(HaveOccurred()) + testLogger.Info("annotation is set") + + // validate annotation is set correctly + Eventually(func() bool { + err = k8sClient.Get(context.Background(), types.NamespacedName{Name: soName, Namespace: "default"}, so) + Expect(err).ToNot(HaveOccurred()) + return so.HasPausedAnnotation() + }).WithTimeout(1 * time.Minute).WithPolling(2 * time.Second).Should(BeTrue()) + + Eventually(func() metav1.ConditionStatus { + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: soName, Namespace: "default"}, so) + if err != nil { + return metav1.ConditionUnknown + } + return so.Status.Conditions.GetPausedCondition().Status + }).WithTimeout(2 * time.Minute).WithPolling(10 * time.Second).Should(Equal(metav1.ConditionTrue)) + + // set annotation to false and confirm that ScaledObject is no longer paused + Eventually(func() error { + err = k8sClient.Get(context.Background(), types.NamespacedName{Name: soName, Namespace: "default"}, so) + Expect(err).ToNot(HaveOccurred()) + annotations := make(map[string]string) + annotations[kedav1alpha1.PausedAnnotation] = "false" + so.SetAnnotations(annotations) + pollingInterval := int32(6) + so.Spec.PollingInterval = &pollingInterval + return k8sClient.Update(context.Background(), so) + }).WithTimeout(1 * time.Minute).WithPolling(10 * time.Second).ShouldNot(HaveOccurred()) + testLogger.Info("annotation is set") + + // validate annotation is set correctly + Eventually(func() bool { + err = k8sClient.Get(context.Background(), types.NamespacedName{Name: soName, Namespace: "default"}, so) + Expect(err).ToNot(HaveOccurred()) + return so.HasPausedAnnotation() + }).WithTimeout(1 * time.Minute).WithPolling(2 * time.Second).Should(BeTrue()) + + // ensure object is no longer paused + Eventually(func() metav1.ConditionStatus { + err := k8sClient.Get(context.Background(), types.NamespacedName{Name: soName, Namespace: "default"}, so) + if err != nil { + return metav1.ConditionUnknown + } + return so.Status.Conditions.GetPausedCondition().Status + }).WithTimeout(2 * time.Minute).WithPolling(10 * time.Second).Should(Equal(metav1.ConditionFalse)) + }) // Fix issue 4253 It("deletes hpa when scaledobject has pause annotation", func() { diff --git a/controllers/keda/suite_test.go b/controllers/keda/suite_test.go index 799cd46cacc..55dba8db166 100644 --- a/controllers/keda/suite_test.go +++ b/controllers/keda/suite_test.go @@ -101,6 +101,13 @@ var _ = BeforeSuite(func() { }).SetupWithManager(k8sManager, controller.Options{}) Expect(err).ToNot(HaveOccurred()) + err = (&ScaledJobReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + Recorder: k8sManager.GetEventRecorderFor("keda-operator"), + }).SetupWithManager(k8sManager, controller.Options{}) + Expect(err).ToNot(HaveOccurred()) + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).ToNot(HaveOccurred()) Expect(k8sClient).ToNot(BeNil()) diff --git a/pkg/eventemitter/eventemitter.go b/pkg/eventemitter/eventemitter.go index bfdac8d6cbd..9c7f21693a5 100644 --- a/pkg/eventemitter/eventemitter.go +++ b/pkg/eventemitter/eventemitter.go @@ -42,6 +42,7 @@ import ( eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" "github.com/kedacore/keda/v2/pkg/eventemitter/eventdata" + "github.com/kedacore/keda/v2/pkg/metricscollector" kedastatus "github.com/kedacore/keda/v2/pkg/status" ) @@ -228,8 +229,10 @@ func (e *EventEmitter) startEventLoop(ctx context.Context, cloudEventSource *eve e.log.V(1).Info("Consuming events from CloudEventSource.") e.emitEventByHandler(eventData) e.checkEventHandlers(ctx, cloudEventSource, cloudEventSourceMutex) + metricscollector.RecordCloudEventQueueStatus(cloudEventSource.Namespace, len(e.cloudEventProcessingChan)) case <-ctx.Done(): e.log.V(1).Info("CloudEventSource loop has stopped.") + metricscollector.RecordCloudEventQueueStatus(cloudEventSource.Namespace, len(e.cloudEventProcessingChan)) return } } @@ -295,6 +298,7 @@ func (e *EventEmitter) Emit(object runtime.Object, namesapce types.NamespacedNam } func (e *EventEmitter) enqueueEventData(eventData eventdata.EventData) { + metricscollector.RecordCloudEventQueueStatus(eventData.Namespace, len(e.cloudEventProcessingChan)) select { case e.cloudEventProcessingChan <- eventData: e.log.V(1).Info("Event enqueued successfully.") @@ -323,6 +327,8 @@ func (e *EventEmitter) emitEventByHandler(eventData eventdata.EventData) { eventData.HandlerKey = key if handler.GetActiveStatus() == metav1.ConditionTrue { go handler.EmitEvent(eventData, e.emitErrorHandle) + + metricscollector.RecordCloudEventEmitted(eventData.Namespace, getSourceNameFromKey(eventData.HandlerKey), getHandlerTypeFromKey(key)) } else { e.log.V(1).Info("EventHandler's status is not active. Please check if event endpoint works well", "CloudEventSource", eventData.ObjectName) } @@ -337,6 +343,8 @@ func (e *EventEmitter) emitEventByHandler(eventData eventdata.EventData) { } func (e *EventEmitter) emitErrorHandle(eventData eventdata.EventData, err error) { + metricscollector.RecordCloudEventEmittedError(eventData.Namespace, getSourceNameFromKey(eventData.HandlerKey), getHandlerTypeFromKey(eventData.HandlerKey)) + if eventData.RetryTimes >= maxRetryTimes { e.log.V(1).Info("Failed to emit Event multiple times. Will set handler failure status.", "handler", eventData.HandlerKey, "retry times", eventData.RetryTimes) handler, found := e.eventHandlersCache[eventData.HandlerKey] @@ -391,3 +399,19 @@ func (e *EventEmitter) updateCloudEventSourceStatus(ctx context.Context, cloudEv func newEventHandlerKey(kindNamespaceName string, handlerType string) string { //nolint:unparam return fmt.Sprintf("%s.%s", kindNamespaceName, handlerType) } + +func getHandlerTypeFromKey(handlerKey string) string { + keys := strings.Split(handlerKey, ".") + if len(keys) >= 4 { + return keys[3] + } + return "" +} + +func getSourceNameFromKey(handlerKey string) string { + keys := strings.Split(handlerKey, ".") + if len(keys) >= 4 { + return keys[2] + } + return "" +} diff --git a/pkg/metricscollector/metricscollectors.go b/pkg/metricscollector/metricscollectors.go index 91de1027c1c..8b70b24ce57 100644 --- a/pkg/metricscollector/metricscollectors.go +++ b/pkg/metricscollector/metricscollectors.go @@ -58,6 +58,15 @@ type MetricsCollector interface { IncrementCRDTotal(crdType, namespace string) DecrementCRDTotal(crdType, namespace string) + + // RecordCloudEventEmitted counts the number of cloudevent that emitted to user's sink + RecordCloudEventEmitted(namespace string, cloudeventsource string, eventsink string) + + // RecordCloudEventEmittedError counts the number of errors occurred in trying emit cloudevent + RecordCloudEventEmittedError(namespace string, cloudeventsource string, eventsink string) + + // RecordCloudEventQueueStatus record the number of cloudevents that are waiting for emitting + RecordCloudEventQueueStatus(namespace string, value int) } func NewMetricsCollectors(enablePrometheusMetrics bool, enableOpenTelemetryMetrics bool) { @@ -144,3 +153,24 @@ func DecrementCRDTotal(crdType, namespace string) { element.DecrementCRDTotal(crdType, namespace) } } + +// RecordCloudEventEmitted counts the number of cloudevent that emitted to user's sink +func RecordCloudEventEmitted(namespace string, cloudeventsource string, eventsink string) { + for _, element := range collectors { + element.RecordCloudEventEmitted(namespace, cloudeventsource, eventsink) + } +} + +// RecordCloudEventEmittedError counts the number of errors occurred in trying emit cloudevent +func RecordCloudEventEmittedError(namespace string, cloudeventsource string, eventsink string) { + for _, element := range collectors { + element.RecordCloudEventEmittedError(namespace, cloudeventsource, eventsink) + } +} + +// RecordCloudEventQueueStatus record the number of cloudevents that are waiting for emitting +func RecordCloudEventQueueStatus(namespace string, value int) { + for _, element := range collectors { + element.RecordCloudEventQueueStatus(namespace, value) + } +} diff --git a/pkg/metricscollector/opentelemetry.go b/pkg/metricscollector/opentelemetry.go index 64211e3f3e4..1bc9d0c3a5f 100644 --- a/pkg/metricscollector/opentelemetry.go +++ b/pkg/metricscollector/opentelemetry.go @@ -34,6 +34,9 @@ var ( otelInternalLoopLatencyVal OtelMetricFloat64Val otelBuildInfoVal OtelMetricInt64Val + otCloudEventEmittedCounter api.Int64Counter + otCloudEventQueueStatusVal OtelMetricFloat64Val + otelScalerActiveVal OtelMetricFloat64Val ) @@ -140,6 +143,20 @@ func initMeters() { if err != nil { otLog.Error(err, msg) } + + otCloudEventEmittedCounter, err = meter.Int64Counter("keda.cloudeventsource.events.emitted.count", api.WithDescription("Measured the total number of emitted cloudevents. 'namespace': namespace of CloudEventSource 'cloudeventsource': name of CloudEventSource object. 'eventsink': destination of this emitted event 'state':indicated events emitted successfully or not")) + if err != nil { + otLog.Error(err, msg) + } + + _, err = meter.Float64ObservableGauge( + "keda.cloudeventsource.events.queued", + api.WithDescription("Indicates how many events are still queue"), + api.WithFloat64Callback(CloudeventQueueStatusCallback), + ) + if err != nil { + otLog.Error(err, msg) + } } func BuildInfoCallback(_ context.Context, obsrv api.Int64Observer) error { @@ -324,3 +341,43 @@ func getScalerMeasurementOption(namespace string, scaledObject string, scaler st attribute.Key("metric").String(metric), ) } + +// RecordCloudEventEmitted counts the number of cloudevent that emitted to user's sink +func (o *OtelMetrics) RecordCloudEventEmitted(namespace string, cloudeventsource string, eventsink string) { + opt := api.WithAttributes( + attribute.Key("namespace").String(namespace), + attribute.Key("cloudEventSource").String(cloudeventsource), + attribute.Key("eventsink").String(eventsink), + attribute.Key("state").String("emitted"), + ) + otCloudEventEmittedCounter.Add(context.Background(), 1, opt) +} + +// RecordCloudEventEmitted counts the number of errors occurred in trying emit cloudevent +func (o *OtelMetrics) RecordCloudEventEmittedError(namespace string, cloudeventsource string, eventsink string) { + opt := api.WithAttributes( + attribute.Key("namespace").String(namespace), + attribute.Key("cloudEventSource").String(cloudeventsource), + attribute.Key("eventsink").String(eventsink), + attribute.Key("state").String("failed"), + ) + otCloudEventEmittedCounter.Add(context.Background(), 1, opt) +} + +func CloudeventQueueStatusCallback(_ context.Context, obsrv api.Float64Observer) error { + if otCloudEventQueueStatusVal.measurementOption != nil { + obsrv.Observe(otCloudEventQueueStatusVal.val, otCloudEventQueueStatusVal.measurementOption) + } + otCloudEventQueueStatusVal = OtelMetricFloat64Val{} + return nil +} + +// RecordCloudEventSourceQueueStatus record the number of cloudevents that are waiting for emitting +func (o *OtelMetrics) RecordCloudEventQueueStatus(namespace string, value int) { + opt := api.WithAttributes( + attribute.Key("namespace").String(namespace), + ) + + otCloudEventQueueStatusVal.val = float64(value) + otCloudEventQueueStatusVal.measurementOption = opt +} diff --git a/pkg/metricscollector/prommetrics.go b/pkg/metricscollector/prommetrics.go index 5d318d378ae..9460bbe0505 100644 --- a/pkg/metricscollector/prommetrics.go +++ b/pkg/metricscollector/prommetrics.go @@ -130,6 +130,27 @@ var ( }, []string{"namespace", "type", "resource"}, ) + + // Total emitted cloudevents. + cloudeventEmitted = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: DefaultPromMetricsNamespace, + Subsystem: "cloudeventsource", + Name: "events_emitted_total", + Help: "Measured the total number of emitted cloudevents. 'namespace': namespace of CloudEventSource 'cloudeventsource': name of CloudEventSource object. 'eventsink': destination of this emitted event 'state':indicated events emitted successfully or not", + }, + []string{"namespace", "cloudeventsource", "eventsink", "state"}, + ) + + cloudeventQueueStatus = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: DefaultPromMetricsNamespace, + Subsystem: "cloudeventsource", + Name: "events_queued", + Help: "Indicates how many events are still queue", + }, + []string{"namespace"}, + ) ) type PromMetrics struct { @@ -149,6 +170,9 @@ func NewPromMetrics() *PromMetrics { metrics.Registry.MustRegister(crdTotalsGaugeVec) metrics.Registry.MustRegister(buildInfo) + metrics.Registry.MustRegister(cloudeventEmitted) + metrics.Registry.MustRegister(cloudeventQueueStatus) + RecordBuildInfo() return &PromMetrics{} } @@ -260,3 +284,20 @@ func (p *PromMetrics) DecrementCRDTotal(crdType, namespace string) { crdTotalsGaugeVec.WithLabelValues(crdType, namespace).Dec() } + +// RecordCloudEventEmitted counts the number of cloudevent that emitted to user's sink +func (p *PromMetrics) RecordCloudEventEmitted(namespace string, cloudeventsource string, eventsink string) { + labels := prometheus.Labels{"namespace": namespace, "cloudeventsource": cloudeventsource, "eventsink": eventsink, "state": "emitted"} + cloudeventEmitted.With(labels).Inc() +} + +// RecordCloudEventEmittedError counts the number of errors occurred in trying emit cloudevent +func (p *PromMetrics) RecordCloudEventEmittedError(namespace string, cloudeventsource string, eventsink string) { + labels := prometheus.Labels{"namespace": namespace, "cloudeventsource": cloudeventsource, "eventsink": eventsink, "state": "failed"} + cloudeventEmitted.With(labels).Inc() +} + +// RecordCloudEventSourceQueueStatus record the number of cloudevents that are waiting for emitting +func (p *PromMetrics) RecordCloudEventQueueStatus(namespace string, value int) { + cloudeventQueueStatus.With(prometheus.Labels{"namespace": namespace}).Set(float64(value)) +} diff --git a/pkg/metricsservice/api/metrics.pb.go b/pkg/metricsservice/api/metrics.pb.go index 919cb333713..e2a0aceed40 100644 --- a/pkg/metricsservice/api/metrics.pb.go +++ b/pkg/metricsservice/api/metrics.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.31.0 -// protoc v4.23.2 +// protoc v4.23.4 // source: metrics.proto package api diff --git a/pkg/metricsservice/api/metrics_grpc.pb.go b/pkg/metricsservice/api/metrics_grpc.pb.go index 9eae639dc04..8836b080af0 100644 --- a/pkg/metricsservice/api/metrics_grpc.pb.go +++ b/pkg/metricsservice/api/metrics_grpc.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.2 +// - protoc v4.23.4 // source: metrics.proto package api diff --git a/pkg/scalers/apache_kafka_scaler.go b/pkg/scalers/apache_kafka_scaler.go index 5d70d2ee660..3a3fec83bf1 100644 --- a/pkg/scalers/apache_kafka_scaler.go +++ b/pkg/scalers/apache_kafka_scaler.go @@ -35,6 +35,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" "k8s.io/metrics/pkg/apis/external_metrics" + awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -70,7 +71,7 @@ type apacheKafkaMetadata struct { // MSK awsRegion string awsEndpoint string - awsAuthorization awsAuthorizationMetadata + awsAuthorization awsutils.AuthorizationMetadata // TLS enableTLS bool @@ -196,7 +197,7 @@ func parseApacheKafkaAuthParams(config *ScalerConfig, meta *apacheKafkaMetadata) } else { return errors.New("no awsRegion given") } - auth, err := getAwsAuthorization(config.AuthParams, config.TriggerMetadata, config.ResolvedEnv) + auth, err := awsutils.GetAwsAuthorization(config.ScalerUniqueKey, config.PodIdentity, config.TriggerMetadata, config.AuthParams, config.ResolvedEnv) if err != nil { return err } @@ -394,7 +395,7 @@ func getApacheKafkaClient(ctx context.Context, metadata apacheKafkaMetadata, log case KafkaSASLTypeOAuthbearer: return nil, errors.New("SASL/OAUTHBEARER is not implemented yet") case KafkaSASLTypeMskIam: - cfg, err := getAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) + cfg, err := awsutils.GetAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) if err != nil { return nil, err } diff --git a/pkg/scalers/aws/aws_authorization.go b/pkg/scalers/aws/aws_authorization.go new file mode 100644 index 00000000000..6a54fa43889 --- /dev/null +++ b/pkg/scalers/aws/aws_authorization.go @@ -0,0 +1,17 @@ +package aws + +type AuthorizationMetadata struct { + AwsRoleArn string + + AwsAccessKeyID string + AwsSecretAccessKey string + AwsSessionToken string + + PodIdentityOwner bool + // Pod identity owner is confusing + // and it'll be removed when we get + // rid of the old aws podIdentities + UsingPodIdentity bool + + ScalerUniqueKey string +} diff --git a/pkg/scalers/aws/aws_common.go b/pkg/scalers/aws/aws_common.go new file mode 100644 index 00000000000..44a4be90676 --- /dev/null +++ b/pkg/scalers/aws/aws_common.go @@ -0,0 +1,113 @@ +package aws + +import ( + "context" + "errors" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/service/sts" + + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" +) + +// ErrAwsNoAccessKey is returned when awsAccessKeyID is missing. +var ErrAwsNoAccessKey = errors.New("awsAccessKeyID not found") + +type awsConfigMetadata struct { + awsRegion string + awsAuthorization AuthorizationMetadata +} + +var awsSharedCredentialsCache = newSharedConfigsCache() + +func GetAwsConfig(ctx context.Context, awsRegion string, awsAuthorization AuthorizationMetadata) (*aws.Config, error) { + metadata := &awsConfigMetadata{ + awsRegion: awsRegion, + awsAuthorization: awsAuthorization, + } + + if metadata.awsAuthorization.UsingPodIdentity || + (metadata.awsAuthorization.AwsAccessKeyID != "" && metadata.awsAuthorization.AwsSecretAccessKey != "") { + return awsSharedCredentialsCache.GetCredentials(ctx, metadata.awsRegion, metadata.awsAuthorization) + } + + // TODO, remove when aws-kiam and aws-eks are removed + configOptions := make([]func(*config.LoadOptions) error, 0) + configOptions = append(configOptions, config.WithRegion(metadata.awsRegion)) + cfg, err := config.LoadDefaultConfig(ctx, configOptions...) + if err != nil { + return nil, err + } + + if !metadata.awsAuthorization.PodIdentityOwner { + return &cfg, nil + } + + if metadata.awsAuthorization.AwsRoleArn != "" { + stsSvc := sts.NewFromConfig(cfg) + stsCredentialProvider := stscreds.NewAssumeRoleProvider(stsSvc, metadata.awsAuthorization.AwsRoleArn, func(options *stscreds.AssumeRoleOptions) {}) + cfg.Credentials = aws.NewCredentialsCache(stsCredentialProvider) + } + return &cfg, err + // END remove when aws-kiam and aws-eks are removed +} + +func GetAwsAuthorization(uniqueKey string, podIdentity kedav1alpha1.AuthPodIdentity, triggerMetadata, authParams, resolvedEnv map[string]string) (AuthorizationMetadata, error) { + meta := AuthorizationMetadata{ + ScalerUniqueKey: uniqueKey, + } + + if podIdentity.Provider == kedav1alpha1.PodIdentityProviderAws { + meta.UsingPodIdentity = true + if val, ok := authParams["awsRoleArn"]; ok && val != "" { + meta.AwsRoleArn = val + } + return meta, nil + } + // TODO, remove all the logic below and just keep the logic for + // parsing awsAccessKeyID, awsSecretAccessKey and awsSessionToken + // when aws-kiam and aws-eks are removed + if triggerMetadata["identityOwner"] == "operator" { + meta.PodIdentityOwner = false + } else if triggerMetadata["identityOwner"] == "" || triggerMetadata["identityOwner"] == "pod" { + meta.PodIdentityOwner = true + switch { + case authParams["awsRoleArn"] != "": + meta.AwsRoleArn = authParams["awsRoleArn"] + case (authParams["awsAccessKeyID"] != "" || authParams["awsAccessKeyId"] != "") && authParams["awsSecretAccessKey"] != "": + meta.AwsAccessKeyID = authParams["awsAccessKeyID"] + if meta.AwsAccessKeyID == "" { + meta.AwsAccessKeyID = authParams["awsAccessKeyId"] + } + meta.AwsSecretAccessKey = authParams["awsSecretAccessKey"] + meta.AwsSessionToken = authParams["awsSessionToken"] + default: + if triggerMetadata["awsAccessKeyID"] != "" { + meta.AwsAccessKeyID = triggerMetadata["awsAccessKeyID"] + } else if triggerMetadata["awsAccessKeyIDFromEnv"] != "" { + meta.AwsAccessKeyID = resolvedEnv[triggerMetadata["awsAccessKeyIDFromEnv"]] + } + + if len(meta.AwsAccessKeyID) == 0 { + return meta, ErrAwsNoAccessKey + } + + if triggerMetadata["awsSecretAccessKeyFromEnv"] != "" { + meta.AwsSecretAccessKey = resolvedEnv[triggerMetadata["awsSecretAccessKeyFromEnv"]] + } + + if len(meta.AwsSecretAccessKey) == 0 { + return meta, fmt.Errorf("awsSecretAccessKey not found") + } + } + } + + return meta, nil +} + +func ClearAwsConfig(awsAuthorization AuthorizationMetadata) { + awsSharedCredentialsCache.RemoveCachedEntry(awsAuthorization) +} diff --git a/pkg/scalers/aws/aws_config_cache.go b/pkg/scalers/aws/aws_config_cache.go new file mode 100644 index 00000000000..a9a19899e66 --- /dev/null +++ b/pkg/scalers/aws/aws_config_cache.go @@ -0,0 +1,119 @@ +package aws + +import ( + "context" + "os" + "sync" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/go-logr/logr" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +type cacheEntry struct { + config *aws.Config + usages map[string]bool // Tracks the resources which have requested the cache +} +type sharedConfigCache struct { + sync.Mutex + items map[string]cacheEntry + logger logr.Logger +} + +func newSharedConfigsCache() sharedConfigCache { + return sharedConfigCache{items: map[string]cacheEntry{}, logger: logf.Log.WithName("aws_credentials_cache")} +} + +func (a *sharedConfigCache) getCacheKey(awsAuthorization AuthorizationMetadata) string { + if awsAuthorization.AwsAccessKeyID != "" && awsAuthorization.AwsSecretAccessKey != "" { + return awsAuthorization.AwsAccessKeyID + } + if awsAuthorization.AwsRoleArn != "" { + return awsAuthorization.AwsRoleArn + } + return "keda" +} + +func (a *sharedConfigCache) GetCredentials(ctx context.Context, awsRegion string, awsAuthorization AuthorizationMetadata) (*aws.Config, error) { + a.Lock() + defer a.Unlock() + key := a.getCacheKey(awsAuthorization) + if cachedEntry, exists := a.items[key]; exists { + cachedEntry.usages[awsAuthorization.ScalerUniqueKey] = true + a.items[key] = cachedEntry + return cachedEntry.config, nil + } + + configOptions := make([]func(*config.LoadOptions) error, 0) + configOptions = append(configOptions, config.WithRegion(awsRegion)) + cfg, err := config.LoadDefaultConfig(ctx, configOptions...) + if err != nil { + return nil, err + } + + if awsAuthorization.UsingPodIdentity { + if awsAuthorization.AwsRoleArn != "" { + cfg.Credentials = a.retrievePodIdentityCredentials(cfg, awsAuthorization.AwsRoleArn) + } + } else { + cfg.Credentials = a.retrieveStaticCredentials(awsAuthorization) + } + + newCacheEntry := cacheEntry{ + config: &cfg, + usages: map[string]bool{}, + } + a.items[key] = newCacheEntry + + return &cfg, nil +} + +func (a *sharedConfigCache) retrievePodIdentityCredentials(cfg aws.Config, roleArn string) *aws.CredentialsCache { + stsSvc := sts.NewFromConfig(cfg) + webIdentityTokenFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE") + + webIdentityCredentialProvider := stscreds.NewWebIdentityRoleProvider(stsSvc, roleArn, stscreds.IdentityTokenFile(webIdentityTokenFile), func(options *stscreds.WebIdentityRoleOptions) { + options.RoleSessionName = "KEDA" + }) + var cachedProvider *aws.CredentialsCache + + _, err := webIdentityCredentialProvider.Retrieve(context.Background()) + if err != nil { + // Fallback to Assume Role + assumeRoleCredentialProvider := stscreds.NewAssumeRoleProvider(stsSvc, roleArn, func(options *stscreds.AssumeRoleOptions) { + options.RoleSessionName = "KEDA" + }) + cachedProvider = aws.NewCredentialsCache(assumeRoleCredentialProvider) + a.logger.V(1).Info("using assume role to retrieve token for arnRole %s", roleArn) + } else { + cachedProvider = aws.NewCredentialsCache(webIdentityCredentialProvider) + a.logger.V(1).Info("using assume web identity role to retrieve token for arnRole %s", roleArn) + } + return cachedProvider +} + +func (*sharedConfigCache) retrieveStaticCredentials(awsAuthorization AuthorizationMetadata) *aws.CredentialsCache { + staticCredentialsProvider := aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(awsAuthorization.AwsAccessKeyID, awsAuthorization.AwsSecretAccessKey, awsAuthorization.AwsSessionToken)) + return staticCredentialsProvider +} + +func (a *sharedConfigCache) RemoveCachedEntry(awsAuthorization AuthorizationMetadata) { + a.Lock() + defer a.Unlock() + key := a.getCacheKey(awsAuthorization) + if cachedEntry, exists := a.items[key]; exists { + // Delete the scalerUniqueKey from usages + delete(cachedEntry.usages, awsAuthorization.ScalerUniqueKey) + + // If no more usages, delete the entire entry from the cache + if len(cachedEntry.usages) == 0 { + delete(a.items, key) + } else { + a.items[awsAuthorization.AwsRoleArn] = cachedEntry + } + } +} diff --git a/pkg/scalers/aws/aws_config_cache_test.go b/pkg/scalers/aws/aws_config_cache_test.go new file mode 100644 index 00000000000..e279f0c7cb8 --- /dev/null +++ b/pkg/scalers/aws/aws_config_cache_test.go @@ -0,0 +1,91 @@ +package aws + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" +) + +func TestGetCredentialsReturnNewItemAndStoreItIfNotExist(t *testing.T) { + cache := newSharedConfigsCache() + cache.logger = logr.Discard() + config := awsConfigMetadata{ + awsRegion: "test-region", + awsAuthorization: AuthorizationMetadata{ + ScalerUniqueKey: "test-key", + }, + } + _, err := cache.GetCredentials(context.Background(), config.awsRegion, config.awsAuthorization) + assert.NoError(t, err) + assert.Contains(t, cache.items, cache.getCacheKey(config.awsAuthorization)) +} + +func TestGetCredentialsReturnCachedItemIfExist(t *testing.T) { + cache := newSharedConfigsCache() + cache.logger = logr.Discard() + config := awsConfigMetadata{ + awsRegion: "test1-region", + awsAuthorization: AuthorizationMetadata{ + ScalerUniqueKey: "test1-key", + }, + } + cfg := aws.Config{} + cfg.AppID = "test1-app" + cache.items[cache.getCacheKey(config.awsAuthorization)] = cacheEntry{ + config: &cfg, + usages: map[string]bool{ + config.awsAuthorization.ScalerUniqueKey: true, + }, + } + configFromCache, err := cache.GetCredentials(context.Background(), config.awsRegion, config.awsAuthorization) + assert.NoError(t, err) + assert.Equal(t, &cfg, configFromCache) +} + +func TestRemoveCachedEntryRemovesCachedItemIfNotUsages(t *testing.T) { + cache := newSharedConfigsCache() + cache.logger = logr.Discard() + config := awsConfigMetadata{ + awsRegion: "test2-region", + awsAuthorization: AuthorizationMetadata{ + ScalerUniqueKey: "test2-key", + }, + } + cfg := aws.Config{} + cfg.AppID = "test2-app" + cacheKey := cache.getCacheKey(config.awsAuthorization) + cache.items[cacheKey] = cacheEntry{ + config: &cfg, + usages: map[string]bool{ + config.awsAuthorization.ScalerUniqueKey: true, + }, + } + cache.RemoveCachedEntry(config.awsAuthorization) + assert.NotContains(t, cache.items, cacheKey) +} + +func TestRemoveCachedEntryNotRemoveCachedItemIfUsages(t *testing.T) { + cache := newSharedConfigsCache() + cache.logger = logr.Discard() + config := awsConfigMetadata{ + awsRegion: "test3-region", + awsAuthorization: AuthorizationMetadata{ + ScalerUniqueKey: "test3-key", + }, + } + cfg := aws.Config{} + cfg.AppID = "test3-app" + cacheKey := cache.getCacheKey(config.awsAuthorization) + cache.items[cacheKey] = cacheEntry{ + config: &cfg, + usages: map[string]bool{ + config.awsAuthorization.ScalerUniqueKey: true, + "other-usage": true, + }, + } + cache.RemoveCachedEntry(config.awsAuthorization) + assert.Contains(t, cache.items, cacheKey) +} diff --git a/pkg/scalers/aws_cloudwatch_scaler.go b/pkg/scalers/aws_cloudwatch_scaler.go index deea194e4eb..187962da8a2 100644 --- a/pkg/scalers/aws_cloudwatch_scaler.go +++ b/pkg/scalers/aws_cloudwatch_scaler.go @@ -13,6 +13,8 @@ import ( "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" "k8s.io/metrics/pkg/apis/external_metrics" + + awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws" ) const ( @@ -49,7 +51,7 @@ type awsCloudwatchMetadata struct { awsRegion string awsEndpoint string - awsAuthorization awsAuthorizationMetadata + awsAuthorization awsutils.AuthorizationMetadata scalerIndex int } @@ -111,7 +113,7 @@ func getFloatMetadataValue(metadata map[string]string, key string, required bool } func createCloudwatchClient(ctx context.Context, metadata *awsCloudwatchMetadata) (*cloudwatch.Client, error) { - cfg, err := getAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) + cfg, err := awsutils.GetAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) if err != nil { return nil, err @@ -230,7 +232,7 @@ func parseAwsCloudwatchMetadata(config *ScalerConfig) (*awsCloudwatchMetadata, e meta.awsEndpoint = val } - awsAuthorization, err := getAwsAuthorization(config.AuthParams, config.TriggerMetadata, config.ResolvedEnv) + awsAuthorization, err := awsutils.GetAwsAuthorization(config.ScalerUniqueKey, config.PodIdentity, config.TriggerMetadata, config.AuthParams, config.ResolvedEnv) if err != nil { return nil, err } @@ -312,6 +314,7 @@ func (s *awsCloudwatchScaler) GetMetricSpecForScaling(context.Context) []v2.Metr } func (s *awsCloudwatchScaler) Close(context.Context) error { + awsutils.ClearAwsConfig(s.metadata.awsAuthorization) return nil } diff --git a/pkg/scalers/aws_cloudwatch_scaler_test.go b/pkg/scalers/aws_cloudwatch_scaler_test.go index 5abdfd393fc..e27dbd97231 100644 --- a/pkg/scalers/aws_cloudwatch_scaler_test.go +++ b/pkg/scalers/aws_cloudwatch_scaler_test.go @@ -10,6 +10,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/go-logr/logr" "github.com/stretchr/testify/assert" + + awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws" ) const ( @@ -375,7 +377,7 @@ var awsCloudwatchGetMetricTestData = []awsCloudwatchMetadata{ metricStatPeriod: 60, metricEndTimeOffset: 60, awsRegion: "us-west-2", - awsAuthorization: awsAuthorizationMetadata{podIdentityOwner: false}, + awsAuthorization: awsutils.AuthorizationMetadata{PodIdentityOwner: false}, scalerIndex: 0, }, { @@ -391,7 +393,7 @@ var awsCloudwatchGetMetricTestData = []awsCloudwatchMetadata{ metricStatPeriod: 60, metricEndTimeOffset: 60, awsRegion: "us-west-2", - awsAuthorization: awsAuthorizationMetadata{podIdentityOwner: false}, + awsAuthorization: awsutils.AuthorizationMetadata{PodIdentityOwner: false}, scalerIndex: 0, }, { @@ -407,7 +409,7 @@ var awsCloudwatchGetMetricTestData = []awsCloudwatchMetadata{ metricStatPeriod: 60, metricEndTimeOffset: 60, awsRegion: "us-west-2", - awsAuthorization: awsAuthorizationMetadata{podIdentityOwner: false}, + awsAuthorization: awsutils.AuthorizationMetadata{PodIdentityOwner: false}, scalerIndex: 0, }, { @@ -423,7 +425,7 @@ var awsCloudwatchGetMetricTestData = []awsCloudwatchMetadata{ metricStatPeriod: 60, metricEndTimeOffset: 60, awsRegion: "us-west-2", - awsAuthorization: awsAuthorizationMetadata{podIdentityOwner: false}, + awsAuthorization: awsutils.AuthorizationMetadata{PodIdentityOwner: false}, scalerIndex: 0, }, { @@ -438,7 +440,7 @@ var awsCloudwatchGetMetricTestData = []awsCloudwatchMetadata{ metricStatPeriod: 60, metricEndTimeOffset: 60, awsRegion: "us-west-2", - awsAuthorization: awsAuthorizationMetadata{podIdentityOwner: false}, + awsAuthorization: awsutils.AuthorizationMetadata{PodIdentityOwner: false}, scalerIndex: 0, }, } diff --git a/pkg/scalers/aws_common.go b/pkg/scalers/aws_common.go deleted file mode 100644 index 9cad15ce3d3..00000000000 --- a/pkg/scalers/aws_common.go +++ /dev/null @@ -1,101 +0,0 @@ -package scalers - -import ( - "context" - "errors" - "fmt" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/service/sts" -) - -// ErrAwsNoAccessKey is returned when awsAccessKeyID is missing. -var ErrAwsNoAccessKey = errors.New("awsAccessKeyID not found") - -type awsAuthorizationMetadata struct { - awsRoleArn string - - awsAccessKeyID string - awsSecretAccessKey string - awsSessionToken string - - podIdentityOwner bool -} - -type awsConfigMetadata struct { - awsRegion string - awsAuthorization awsAuthorizationMetadata -} - -func getAwsConfig(ctx context.Context, awsRegion string, awsAuthorization awsAuthorizationMetadata) (*aws.Config, error) { - metadata := &awsConfigMetadata{ - awsRegion: awsRegion, - awsAuthorization: awsAuthorization, - } - - configOptions := make([]func(*config.LoadOptions) error, 0) - configOptions = append(configOptions, config.WithRegion(metadata.awsRegion)) - cfg, err := config.LoadDefaultConfig(ctx, configOptions...) - if err != nil { - return nil, err - } - if !metadata.awsAuthorization.podIdentityOwner { - return &cfg, nil - } - if metadata.awsAuthorization.awsAccessKeyID != "" && metadata.awsAuthorization.awsSecretAccessKey != "" { - staticCredentialsProvider := aws.NewCredentialsCache(credentials.NewStaticCredentialsProvider(metadata.awsAuthorization.awsAccessKeyID, metadata.awsAuthorization.awsSecretAccessKey, metadata.awsAuthorization.awsSessionToken)) - cfg.Credentials = staticCredentialsProvider - } - - if metadata.awsAuthorization.awsRoleArn != "" { - stsSvc := sts.NewFromConfig(cfg) - stsCredentialProvider := stscreds.NewAssumeRoleProvider(stsSvc, metadata.awsAuthorization.awsRoleArn, func(options *stscreds.AssumeRoleOptions) {}) - cfg.Credentials = aws.NewCredentialsCache(stsCredentialProvider) - } - - return &cfg, err -} - -func getAwsAuthorization(authParams, metadata, resolvedEnv map[string]string) (awsAuthorizationMetadata, error) { - meta := awsAuthorizationMetadata{} - - if metadata["identityOwner"] == "operator" { - meta.podIdentityOwner = false - } else if metadata["identityOwner"] == "" || metadata["identityOwner"] == "pod" { - meta.podIdentityOwner = true - switch { - case authParams["awsRoleArn"] != "": - meta.awsRoleArn = authParams["awsRoleArn"] - case (authParams["awsAccessKeyID"] != "" || authParams["awsAccessKeyId"] != "") && authParams["awsSecretAccessKey"] != "": - meta.awsAccessKeyID = authParams["awsAccessKeyID"] - if meta.awsAccessKeyID == "" { - meta.awsAccessKeyID = authParams["awsAccessKeyId"] - } - meta.awsSecretAccessKey = authParams["awsSecretAccessKey"] - meta.awsSessionToken = authParams["awsSessionToken"] - default: - if metadata["awsAccessKeyID"] != "" { - meta.awsAccessKeyID = metadata["awsAccessKeyID"] - } else if metadata["awsAccessKeyIDFromEnv"] != "" { - meta.awsAccessKeyID = resolvedEnv[metadata["awsAccessKeyIDFromEnv"]] - } - - if len(meta.awsAccessKeyID) == 0 { - return meta, ErrAwsNoAccessKey - } - - if metadata["awsSecretAccessKeyFromEnv"] != "" { - meta.awsSecretAccessKey = resolvedEnv[metadata["awsSecretAccessKeyFromEnv"]] - } - - if len(meta.awsSecretAccessKey) == 0 { - return meta, fmt.Errorf("awsSecretAccessKey not found") - } - } - } - - return meta, nil -} diff --git a/pkg/scalers/aws_dynamodb_scaler.go b/pkg/scalers/aws_dynamodb_scaler.go index 05b173fd1ab..05466b527ab 100644 --- a/pkg/scalers/aws_dynamodb_scaler.go +++ b/pkg/scalers/aws_dynamodb_scaler.go @@ -15,6 +15,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" "k8s.io/metrics/pkg/apis/external_metrics" + awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -35,7 +36,7 @@ type awsDynamoDBMetadata struct { indexName string targetValue int64 activationTargetValue int64 - awsAuthorization awsAuthorizationMetadata + awsAuthorization awsutils.AuthorizationMetadata scalerIndex int metricName string } @@ -170,7 +171,7 @@ func parseAwsDynamoDBMetadata(config *ScalerConfig) (*awsDynamoDBMetadata, error meta.activationTargetValue = 0 } - auth, err := getAwsAuthorization(config.AuthParams, config.TriggerMetadata, config.ResolvedEnv) + auth, err := awsutils.GetAwsAuthorization(config.ScalerUniqueKey, config.PodIdentity, config.TriggerMetadata, config.AuthParams, config.ResolvedEnv) if err != nil { return nil, err } @@ -185,7 +186,7 @@ func parseAwsDynamoDBMetadata(config *ScalerConfig) (*awsDynamoDBMetadata, error } func createDynamoDBClient(ctx context.Context, metadata *awsDynamoDBMetadata) (*dynamodb.Client, error) { - cfg, err := getAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) + cfg, err := awsutils.GetAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) if err != nil { return nil, err } @@ -224,6 +225,7 @@ func (s *awsDynamoDBScaler) GetMetricSpecForScaling(context.Context) []v2.Metric } func (s *awsDynamoDBScaler) Close(context.Context) error { + awsutils.ClearAwsConfig(s.metadata.awsAuthorization) return nil } diff --git a/pkg/scalers/aws_dynamodb_scaler_test.go b/pkg/scalers/aws_dynamodb_scaler_test.go index 3ed599f5260..6cfb2bc5310 100644 --- a/pkg/scalers/aws_dynamodb_scaler_test.go +++ b/pkg/scalers/aws_dynamodb_scaler_test.go @@ -11,6 +11,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" "github.com/go-logr/logr" "github.com/stretchr/testify/assert" + + awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws" ) const ( @@ -167,7 +169,7 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{ "targetValue": "3", }, authParams: map[string]string{}, - expectedError: ErrAwsNoAccessKey, + expectedError: awsutils.ErrAwsNoAccessKey, }, { name: "authentication provided", @@ -190,10 +192,10 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{ targetValue: 3, scalerIndex: 1, metricName: "s1-aws-dynamodb-test", - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: "none", - awsSecretAccessKey: "none", - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: "none", + AwsSecretAccessKey: "none", + PodIdentityOwner: true, }, }, }, @@ -220,10 +222,10 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{ targetValue: 3, scalerIndex: 1, metricName: "s1-aws-dynamodb-test", - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: "none", - awsSecretAccessKey: "none", - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: "none", + AwsSecretAccessKey: "none", + PodIdentityOwner: true, }, }, }, @@ -250,10 +252,10 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{ targetValue: 3, scalerIndex: 1, metricName: "s1-aws-dynamodb-test", - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: "none", - awsSecretAccessKey: "none", - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: "none", + AwsSecretAccessKey: "none", + PodIdentityOwner: true, }, }, }, @@ -280,10 +282,10 @@ var dynamoTestCases = []parseDynamoDBMetadataTestData{ targetValue: 3, scalerIndex: 1, metricName: "s1-aws-dynamodb-test", - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: "none", - awsSecretAccessKey: "none", - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: "none", + AwsSecretAccessKey: "none", + PodIdentityOwner: true, }, }, }, diff --git a/pkg/scalers/aws_dynamodb_streams_scaler.go b/pkg/scalers/aws_dynamodb_streams_scaler.go index 40c5b32a641..2d9f2158108 100644 --- a/pkg/scalers/aws_dynamodb_streams_scaler.go +++ b/pkg/scalers/aws_dynamodb_streams_scaler.go @@ -12,6 +12,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" "k8s.io/metrics/pkg/apis/external_metrics" + awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -34,7 +35,7 @@ type awsDynamoDBStreamsMetadata struct { tableName string awsRegion string awsEndpoint string - awsAuthorization awsAuthorizationMetadata + awsAuthorization awsutils.AuthorizationMetadata scalerIndex int } @@ -111,7 +112,7 @@ func parseAwsDynamoDBStreamsMetadata(config *ScalerConfig, logger logr.Logger) ( } } - auth, err := getAwsAuthorization(config.AuthParams, config.TriggerMetadata, config.ResolvedEnv) + auth, err := awsutils.GetAwsAuthorization(config.ScalerUniqueKey, config.PodIdentity, config.TriggerMetadata, config.AuthParams, config.ResolvedEnv) if err != nil { return nil, err } @@ -123,7 +124,7 @@ func parseAwsDynamoDBStreamsMetadata(config *ScalerConfig, logger logr.Logger) ( } func createClientsForDynamoDBStreamsScaler(ctx context.Context, metadata *awsDynamoDBStreamsMetadata) (*dynamodb.Client, *dynamodbstreams.Client, error) { - cfg, err := getAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) + cfg, err := awsutils.GetAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) if err != nil { return nil, nil, err } @@ -167,6 +168,7 @@ func getDynamoDBStreamsArn(ctx context.Context, db dynamodb.DescribeTableAPIClie } func (s *awsDynamoDBStreamsScaler) Close(_ context.Context) error { + awsutils.ClearAwsConfig(s.metadata.awsAuthorization) return nil } diff --git a/pkg/scalers/aws_dynamodb_streams_scaler_test.go b/pkg/scalers/aws_dynamodb_streams_scaler_test.go index 501cbf28a21..4f6cc5d438d 100644 --- a/pkg/scalers/aws_dynamodb_streams_scaler_test.go +++ b/pkg/scalers/aws_dynamodb_streams_scaler_test.go @@ -15,6 +15,8 @@ import ( "github.com/go-logr/logr" "github.com/stretchr/testify/assert" "k8s.io/metrics/pkg/apis/external_metrics" + + awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws" ) const ( @@ -136,10 +138,10 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{ activationTargetShardCount: 1, tableName: testAWSDynamoDBSmallTable, awsRegion: testAWSDynamoDBStreamsRegion, - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID, - awsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey, - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID, + AwsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey, + PodIdentityOwner: true, }, scalerIndex: 0, }, @@ -161,10 +163,10 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{ tableName: testAWSDynamoDBSmallTable, awsRegion: testAWSDynamoDBStreamsRegion, awsEndpoint: testAWSDynamoDBStreamsEndpoint, - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID, - awsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey, - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID, + AwsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey, + PodIdentityOwner: true, }, scalerIndex: 0, }, @@ -205,10 +207,10 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{ activationTargetShardCount: defaultActivationTargetDBStreamsShardCount, tableName: testAWSDynamoDBSmallTable, awsRegion: testAWSDynamoDBStreamsRegion, - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID, - awsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey, - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID, + AwsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey, + PodIdentityOwner: true, }, scalerIndex: 3, }, @@ -226,10 +228,10 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{ targetShardCount: defaultTargetDBStreamsShardCount, tableName: testAWSDynamoDBSmallTable, awsRegion: testAWSDynamoDBStreamsRegion, - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID, - awsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey, - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID, + AwsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey, + PodIdentityOwner: true, }, scalerIndex: 4, }, @@ -278,11 +280,11 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{ targetShardCount: 2, tableName: testAWSDynamoDBSmallTable, awsRegion: testAWSDynamoDBStreamsRegion, - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID, - awsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey, - awsSessionToken: testAWSDynamoDBStreamsSessionToken, - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: testAWSDynamoDBStreamsAccessKeyID, + AwsSecretAccessKey: testAWSDynamoDBStreamsSecretAccessKey, + AwsSessionToken: testAWSDynamoDBStreamsSessionToken, + PodIdentityOwner: true, }, scalerIndex: 5, }, @@ -330,9 +332,9 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{ targetShardCount: 2, tableName: testAWSDynamoDBSmallTable, awsRegion: testAWSDynamoDBStreamsRegion, - awsAuthorization: awsAuthorizationMetadata{ - awsRoleArn: testAWSDynamoDBStreamsRoleArn, - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsRoleArn: testAWSDynamoDBStreamsRoleArn, + PodIdentityOwner: true, }, scalerIndex: 7, }, @@ -350,8 +352,8 @@ var testAwsDynamoDBStreamMetadata = []parseAwsDynamoDBStreamsMetadataTestData{ targetShardCount: 2, tableName: testAWSDynamoDBSmallTable, awsRegion: testAWSDynamoDBStreamsRegion, - awsAuthorization: awsAuthorizationMetadata{ - podIdentityOwner: false, + awsAuthorization: awsutils.AuthorizationMetadata{ + PodIdentityOwner: false, }, scalerIndex: 8, }, diff --git a/pkg/scalers/aws_kinesis_stream_scaler.go b/pkg/scalers/aws_kinesis_stream_scaler.go index 95ff02643a1..df4e0c66d1e 100644 --- a/pkg/scalers/aws_kinesis_stream_scaler.go +++ b/pkg/scalers/aws_kinesis_stream_scaler.go @@ -11,6 +11,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" "k8s.io/metrics/pkg/apis/external_metrics" + awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -44,7 +45,7 @@ type awsKinesisStreamMetadata struct { streamName string awsRegion string awsEndpoint string - awsAuthorization awsAuthorizationMetadata + awsAuthorization awsutils.AuthorizationMetadata scalerIndex int } @@ -116,7 +117,7 @@ func parseAwsKinesisStreamMetadata(config *ScalerConfig, logger logr.Logger) (*a meta.awsEndpoint = val } - auth, err := getAwsAuthorization(config.AuthParams, config.TriggerMetadata, config.ResolvedEnv) + auth, err := awsutils.GetAwsAuthorization(config.ScalerUniqueKey, config.PodIdentity, config.TriggerMetadata, config.AuthParams, config.ResolvedEnv) if err != nil { return nil, err } @@ -129,7 +130,7 @@ func parseAwsKinesisStreamMetadata(config *ScalerConfig, logger logr.Logger) (*a } func createKinesisClient(ctx context.Context, metadata *awsKinesisStreamMetadata) (*kinesis.Client, error) { - cfg, err := getAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) + cfg, err := awsutils.GetAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) if err != nil { return nil, err } @@ -141,6 +142,7 @@ func createKinesisClient(ctx context.Context, metadata *awsKinesisStreamMetadata } func (s *awsKinesisStreamScaler) Close(context.Context) error { + awsutils.ClearAwsConfig(s.metadata.awsAuthorization) return nil } diff --git a/pkg/scalers/aws_kinesis_stream_scaler_test.go b/pkg/scalers/aws_kinesis_stream_scaler_test.go index eef209df679..d759a6a5ece 100644 --- a/pkg/scalers/aws_kinesis_stream_scaler_test.go +++ b/pkg/scalers/aws_kinesis_stream_scaler_test.go @@ -11,6 +11,8 @@ import ( "github.com/aws/aws-sdk-go-v2/service/kinesis/types" "github.com/go-logr/logr" "github.com/stretchr/testify/assert" + + awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws" ) const ( @@ -78,10 +80,10 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{ activationTargetShardCount: 1, streamName: testAWSKinesisStreamName, awsRegion: testAWSRegion, - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: testAWSKinesisAccessKeyID, - awsSecretAccessKey: testAWSKinesisSecretAccessKey, - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: testAWSKinesisAccessKeyID, + AwsSecretAccessKey: testAWSKinesisSecretAccessKey, + PodIdentityOwner: true, }, scalerIndex: 0, }, @@ -103,10 +105,10 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{ streamName: testAWSKinesisStreamName, awsRegion: testAWSRegion, awsEndpoint: testAWSEndpoint, - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: testAWSKinesisAccessKeyID, - awsSecretAccessKey: testAWSKinesisSecretAccessKey, - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: testAWSKinesisAccessKeyID, + AwsSecretAccessKey: testAWSKinesisSecretAccessKey, + PodIdentityOwner: true, }, scalerIndex: 0, }, @@ -148,10 +150,10 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{ activationTargetShardCount: activationTargetShardCountDefault, streamName: testAWSKinesisStreamName, awsRegion: testAWSRegion, - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: testAWSKinesisAccessKeyID, - awsSecretAccessKey: testAWSKinesisSecretAccessKey, - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: testAWSKinesisAccessKeyID, + AwsSecretAccessKey: testAWSKinesisSecretAccessKey, + PodIdentityOwner: true, }, scalerIndex: 3, }, @@ -169,10 +171,10 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{ targetShardCount: 2, streamName: testAWSKinesisStreamName, awsRegion: testAWSRegion, - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: testAWSKinesisAccessKeyID, - awsSecretAccessKey: testAWSKinesisSecretAccessKey, - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: testAWSKinesisAccessKeyID, + AwsSecretAccessKey: testAWSKinesisSecretAccessKey, + PodIdentityOwner: true, }, scalerIndex: 4, }, @@ -221,11 +223,11 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{ targetShardCount: 2, streamName: testAWSKinesisStreamName, awsRegion: testAWSRegion, - awsAuthorization: awsAuthorizationMetadata{ - awsAccessKeyID: testAWSKinesisAccessKeyID, - awsSecretAccessKey: testAWSKinesisSecretAccessKey, - awsSessionToken: testAWSKinesisSessionToken, - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsAccessKeyID: testAWSKinesisAccessKeyID, + AwsSecretAccessKey: testAWSKinesisSecretAccessKey, + AwsSessionToken: testAWSKinesisSessionToken, + PodIdentityOwner: true, }, scalerIndex: 5, }, @@ -273,9 +275,9 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{ targetShardCount: 2, streamName: testAWSKinesisStreamName, awsRegion: testAWSRegion, - awsAuthorization: awsAuthorizationMetadata{ - awsRoleArn: testAWSKinesisRoleArn, - podIdentityOwner: true, + awsAuthorization: awsutils.AuthorizationMetadata{ + AwsRoleArn: testAWSKinesisRoleArn, + PodIdentityOwner: true, }, scalerIndex: 7, }, @@ -293,8 +295,8 @@ var testAWSKinesisMetadata = []parseAWSKinesisMetadataTestData{ targetShardCount: 2, streamName: testAWSKinesisStreamName, awsRegion: testAWSRegion, - awsAuthorization: awsAuthorizationMetadata{ - podIdentityOwner: false, + awsAuthorization: awsutils.AuthorizationMetadata{ + PodIdentityOwner: false, }, scalerIndex: 8, }, diff --git a/pkg/scalers/aws_sqs_queue_scaler.go b/pkg/scalers/aws_sqs_queue_scaler.go index 40e39217d10..2a603fe82f7 100644 --- a/pkg/scalers/aws_sqs_queue_scaler.go +++ b/pkg/scalers/aws_sqs_queue_scaler.go @@ -14,6 +14,7 @@ import ( v2 "k8s.io/api/autoscaling/v2" "k8s.io/metrics/pkg/apis/external_metrics" + awsutils "github.com/kedacore/keda/v2/pkg/scalers/aws" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -38,7 +39,7 @@ type awsSqsQueueMetadata struct { queueName string awsRegion string awsEndpoint string - awsAuthorization awsAuthorizationMetadata + awsAuthorization awsutils.AuthorizationMetadata scalerIndex int scaleOnInFlight bool scaleOnDelayed bool @@ -175,7 +176,7 @@ func parseAwsSqsQueueMetadata(config *ScalerConfig, logger logr.Logger) (*awsSqs meta.awsEndpoint = val } - auth, err := getAwsAuthorization(config.AuthParams, config.TriggerMetadata, config.ResolvedEnv) + auth, err := awsutils.GetAwsAuthorization(config.ScalerUniqueKey, config.PodIdentity, config.TriggerMetadata, config.AuthParams, config.ResolvedEnv) if err != nil { return nil, err } @@ -188,7 +189,7 @@ func parseAwsSqsQueueMetadata(config *ScalerConfig, logger logr.Logger) (*awsSqs } func createSqsClient(ctx context.Context, metadata *awsSqsQueueMetadata) (*sqs.Client, error) { - cfg, err := getAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) + cfg, err := awsutils.GetAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) if err != nil { return nil, err } @@ -200,6 +201,7 @@ func createSqsClient(ctx context.Context, metadata *awsSqsQueueMetadata) (*sqs.C } func (s *awsSqsQueueScaler) Close(context.Context) error { + awsutils.ClearAwsConfig(s.metadata.awsAuthorization) return nil } diff --git a/pkg/scalers/externalscaler/externalscaler.pb.go b/pkg/scalers/externalscaler/externalscaler.pb.go index a8515ecc162..b67c09f0875 100644 --- a/pkg/scalers/externalscaler/externalscaler.pb.go +++ b/pkg/scalers/externalscaler/externalscaler.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.31.0 -// protoc v4.23.2 +// protoc v4.23.4 // source: externalscaler.proto package externalscaler diff --git a/pkg/scalers/externalscaler/externalscaler_grpc.pb.go b/pkg/scalers/externalscaler/externalscaler_grpc.pb.go index 5489ae58ac4..e9944342368 100644 --- a/pkg/scalers/externalscaler/externalscaler_grpc.pb.go +++ b/pkg/scalers/externalscaler/externalscaler_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.2 +// - protoc v4.23.4 // source: externalscaler.proto package externalscaler diff --git a/pkg/scalers/liiklus/LiiklusService.pb.go b/pkg/scalers/liiklus/LiiklusService.pb.go index 54a8f7b33c8..30ce47cf816 100644 --- a/pkg/scalers/liiklus/LiiklusService.pb.go +++ b/pkg/scalers/liiklus/LiiklusService.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.31.0 -// protoc v4.23.2 +// protoc v4.23.4 // source: LiiklusService.proto package liiklus diff --git a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go index 51480c39dae..0e3731ee10d 100644 --- a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go +++ b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.23.2 +// - protoc v4.23.4 // source: LiiklusService.proto package liiklus diff --git a/pkg/scalers/scaler.go b/pkg/scalers/scaler.go index b2f8c067e3c..498d2195844 100644 --- a/pkg/scalers/scaler.go +++ b/pkg/scalers/scaler.go @@ -20,6 +20,8 @@ import ( "context" "errors" "fmt" + "reflect" + "strconv" "strings" "time" @@ -97,6 +99,9 @@ type ScalerConfig struct { // ScalerIndex ScalerIndex int + // ScalerUniqueKey for the scaler across KEDA. Useful to identify uniquely the scaler, eg: AWS credentials cache + ScalerUniqueKey string + // MetricType MetricType v2.MetricTargetType @@ -209,3 +214,64 @@ func GenerateMetricInMili(metricName string, value float64) external_metrics.Ext Timestamp: metav1.Now(), } } + +// getParameterFromConfigV2 returns the value of the parameter from the config +func getParameterFromConfigV2(config *ScalerConfig, parameter string, useMetadata bool, useAuthentication bool, useResolvedEnv bool, isOptional bool, defaultVal string, targetType reflect.Type) (interface{}, error) { + if val, ok := config.AuthParams[parameter]; useAuthentication && ok && val != "" { + returnedVal, err := convertStringToType(val, targetType) + if err != nil { + return defaultVal, err + } + return returnedVal, nil + } else if val, ok := config.TriggerMetadata[parameter]; ok && useMetadata && val != "" { + returnedVal, err := convertStringToType(val, targetType) + if err != nil { + return defaultVal, err + } + return returnedVal, nil + } else if val, ok := config.TriggerMetadata[fmt.Sprintf("%sFromEnv", parameter)]; ok && useResolvedEnv && val != "" { + returnedVal, err := convertStringToType(val, targetType) + if err != nil { + return defaultVal, err + } + return returnedVal, nil + } + + if isOptional { + return defaultVal, nil + } + return "", fmt.Errorf("key not found. Either set the correct key or set isOptional to true and set defaultVal") +} + +func convertStringToType(input string, targetType reflect.Type) (interface{}, error) { + switch targetType.Kind() { + case reflect.String: + return input, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + result, err := strconv.ParseInt(input, 10, 64) + if err != nil { + return nil, err + } + return reflect.ValueOf(result).Convert(targetType).Interface(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + result, err := strconv.ParseUint(input, 10, 64) + if err != nil { + return nil, err + } + return reflect.ValueOf(result).Convert(targetType).Interface(), nil + case reflect.Float32, reflect.Float64: + result, err := strconv.ParseFloat(input, 64) + if err != nil { + return nil, err + } + return reflect.ValueOf(result).Convert(targetType).Interface(), nil + case reflect.Bool: + result, err := strconv.ParseBool(input) + if err != nil { + return nil, err + } + return result, nil + default: + return nil, fmt.Errorf("unsupported type: %v", targetType) + } +} diff --git a/pkg/scalers/scaler_test.go b/pkg/scalers/scaler_test.go index 7a6f56f1618..e39c31ee6bc 100644 --- a/pkg/scalers/scaler_test.go +++ b/pkg/scalers/scaler_test.go @@ -1,6 +1,7 @@ package scalers import ( + "reflect" "testing" "github.com/stretchr/testify/assert" @@ -121,3 +122,245 @@ func TestRemoveIndexFromMetricName(t *testing.T) { } } } + +type getParameterFromConfigTestData struct { + name string + authParams map[string]string + metadata map[string]string + parameter string + useAuthentication bool + useMetadata bool + useResolvedEnv bool + isOptional bool + defaultVal string + targetType reflect.Type + expectedResult interface{} + isError bool + errorMessage string +} + +var getParameterFromConfigTestDataset = []getParameterFromConfigTestData{ + { + name: "test_authParam_only", + authParams: map[string]string{"key1": "value1"}, + parameter: "key1", + useAuthentication: true, + targetType: reflect.TypeOf(string("")), + expectedResult: "value1", + isError: false, + }, + { + name: "test_trigger_metadata_only", + metadata: map[string]string{"key1": "value1"}, + parameter: "key1", + useMetadata: true, + targetType: reflect.TypeOf(string("")), + expectedResult: "value1", + isError: false, + }, + { + name: "test_resolved_env_only", + metadata: map[string]string{"key1FromEnv": "value1"}, + parameter: "key1", + useResolvedEnv: true, + targetType: reflect.TypeOf(string("")), + expectedResult: "value1", + isError: false, + }, + { + name: "test_authParam_and_resolved_env_only", + authParams: map[string]string{"key1": "value1"}, + metadata: map[string]string{"key1FromEnv": "value2"}, + parameter: "key1", + useAuthentication: true, + useResolvedEnv: true, + targetType: reflect.TypeOf(string("")), + expectedResult: "value1", // Should get from Auth + isError: false, + }, + { + name: "test_authParam_and_trigger_metadata_only", + authParams: map[string]string{"key1": "value1"}, + metadata: map[string]string{"key1": "value2"}, + parameter: "key1", + useMetadata: true, + useAuthentication: true, + targetType: reflect.TypeOf(string("")), + expectedResult: "value1", // Should get from auth + isError: false, + }, + { + name: "test_trigger_metadata_and_resolved_env_only", + metadata: map[string]string{"key1": "value1", "key1FromEnv": "value2"}, + parameter: "key1", + useResolvedEnv: true, + useMetadata: true, + targetType: reflect.TypeOf(string("")), + expectedResult: "value1", // Should get from trigger metadata + isError: false, + }, + { + name: "test_isOptional_key_not_found", + metadata: map[string]string{"key1": "value1"}, + parameter: "key2", + useResolvedEnv: true, + useMetadata: true, + isOptional: true, + targetType: reflect.TypeOf(string("")), + expectedResult: "", // Should return empty string + isError: false, + }, + { + name: "test_default_value_key_not_found", + metadata: map[string]string{"key1": "value1"}, + parameter: "key2", + useResolvedEnv: true, + useMetadata: true, + isOptional: true, + defaultVal: "default", + targetType: reflect.TypeOf(string("")), + expectedResult: "default", + isError: false, + }, + { + name: "test_error", + metadata: map[string]string{"key1": "value1"}, + parameter: "key2", + useResolvedEnv: true, + useMetadata: true, + targetType: reflect.TypeOf(string("")), + expectedResult: "default", // Should return empty string + isError: true, + errorMessage: "key not found. Either set the correct key or set isOptional to true and set defaultVal", + }, + { + name: "test_authParam_bool", + authParams: map[string]string{"key1": "true"}, + parameter: "key1", + useAuthentication: true, + targetType: reflect.TypeOf(true), + expectedResult: true, + }, +} + +func TestGetParameterFromConfigV2(t *testing.T) { + for _, testData := range getParameterFromConfigTestDataset { + val, err := getParameterFromConfigV2( + &ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}, + testData.parameter, + testData.useMetadata, + testData.useAuthentication, + testData.useResolvedEnv, + testData.isOptional, + testData.defaultVal, + testData.targetType, + ) + if testData.isError { + assert.NotNilf(t, err, "test %s: expected error but got success, testData - %+v", testData.name, testData) + assert.Containsf(t, err.Error(), testData.errorMessage, "test %s: %v", testData.name, err.Error()) + } else { + assert.Nilf(t, err, "test %s:%v", testData.name, err) + assert.Equalf(t, testData.expectedResult, val, "test %s: expected %s but got %s", testData.name, testData.expectedResult, val) + } + } +} + +type convertStringToTypeTestData struct { + name string + input string + targetType reflect.Type + expectedOutput interface{} + isError bool + errorMessage string +} + +var convertStringToTypeDataset = []convertStringToTypeTestData{ + { + name: "test string", + input: "test", + targetType: reflect.TypeOf(string("")), + expectedOutput: "test", + }, + { + name: "test int", + input: "6", + targetType: reflect.TypeOf(int(6)), + expectedOutput: 6, + }, + { + name: "test int64 max", + input: "9223372036854775807", // int64 max + targetType: reflect.TypeOf(int64(6)), + expectedOutput: int64(9223372036854775807), + }, + { + name: "test int64 min", + input: "-9223372036854775808", // int64 min + targetType: reflect.TypeOf(int64(6)), + expectedOutput: int64(-9223372036854775808), + }, + { + name: "test uint64 max", + input: "18446744073709551615", // uint64 max + targetType: reflect.TypeOf(uint64(6)), + expectedOutput: uint64(18446744073709551615), + }, + { + name: "test float32", + input: "3.14", + targetType: reflect.TypeOf(float32(3.14)), + expectedOutput: float32(3.14), + }, + { + name: "test float64", + input: "0.123456789121212121212", + targetType: reflect.TypeOf(float64(0.123456789121212121212)), + expectedOutput: float64(0.123456789121212121212), + }, + { + name: "test bool", + input: "true", + targetType: reflect.TypeOf(true), + expectedOutput: true, + }, + { + name: "test bool 2", + input: "True", + targetType: reflect.TypeOf(true), + expectedOutput: true, + }, + { + name: "test bool 3", + input: "false", + targetType: reflect.TypeOf(false), + expectedOutput: false, + }, + { + name: "test bool 4", + input: "False", + targetType: reflect.TypeOf(false), + expectedOutput: false, + }, + { + name: "unsupported type", + input: "Unsupported Type", + targetType: reflect.TypeOf([]int{}), + expectedOutput: "error", + isError: true, + errorMessage: "unsupported type: []int", + }, +} + +func TestConvertStringToType(t *testing.T) { + for _, testData := range convertStringToTypeDataset { + val, err := convertStringToType(testData.input, testData.targetType) + + if testData.isError { + assert.NotNilf(t, err, "test %s: expected error but got success, testData - %+v", testData.name, testData) + assert.Contains(t, err.Error(), testData.errorMessage) + } else { + assert.Nil(t, err) + assert.Equalf(t, testData.expectedOutput, val, "test %s: expected %s but got %s", testData.name, testData.expectedOutput, val) + } + } +} diff --git a/pkg/scaling/resolver/scale_resolvers.go b/pkg/scaling/resolver/scale_resolvers.go index 99eb4e9fb1f..de64a635b6b 100644 --- a/pkg/scaling/resolver/scale_resolvers.go +++ b/pkg/scaling/resolver/scale_resolvers.go @@ -180,16 +180,41 @@ func ResolveAuthRefAndPodIdentity(ctx context.Context, client client.Client, log triggerAuthRef *kedav1alpha1.AuthenticationRef, podTemplateSpec *corev1.PodTemplateSpec, namespace string, secretsLister corev1listers.SecretLister) (map[string]string, kedav1alpha1.AuthPodIdentity, error) { if podTemplateSpec != nil { - authParams, podIdentity := resolveAuthRef(ctx, client, logger, triggerAuthRef, &podTemplateSpec.Spec, namespace, secretsLister) + authParams, podIdentity, err := resolveAuthRef(ctx, client, logger, triggerAuthRef, &podTemplateSpec.Spec, namespace, secretsLister) + + if err != nil { + return authParams, podIdentity, err + } switch podIdentity.Provider { + case kedav1alpha1.PodIdentityProviderAws: + if podIdentity.RoleArn != "" { + if podIdentity.IsWorkloadIdentityOwner() { + return nil, kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, + fmt.Errorf("roleArn can't be set if KEDA isn't identity owner, current value: '%s'", *podIdentity.IdentityOwner) + } + authParams["awsRoleArn"] = podIdentity.RoleArn + } + if podIdentity.IsWorkloadIdentityOwner() { + serviceAccountName := defaultServiceAccount + if podTemplateSpec.Spec.ServiceAccountName != "" { + serviceAccountName = podTemplateSpec.Spec.ServiceAccountName + } + serviceAccount := &corev1.ServiceAccount{} + err := client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: namespace}, serviceAccount) + if err != nil { + return nil, kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, + fmt.Errorf("error getting service account: '%s', error: %w", serviceAccountName, err) + } + authParams["awsRoleArn"] = serviceAccount.Annotations[kedav1alpha1.PodIdentityAnnotationEKS] + } case kedav1alpha1.PodIdentityProviderAwsEKS: serviceAccountName := defaultServiceAccount if podTemplateSpec.Spec.ServiceAccountName != "" { serviceAccountName = podTemplateSpec.Spec.ServiceAccountName } serviceAccount := &corev1.ServiceAccount{} - err := client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: namespace}, serviceAccount) + err = client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: namespace}, serviceAccount) if err != nil { return nil, kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, fmt.Errorf("error getting service account: '%s', error: %w", serviceAccountName, err) @@ -198,10 +223,6 @@ func ResolveAuthRefAndPodIdentity(ctx context.Context, client client.Client, log case kedav1alpha1.PodIdentityProviderAwsKiam: authParams["awsRoleArn"] = podTemplateSpec.ObjectMeta.Annotations[kedav1alpha1.PodIdentityAnnotationKiam] case kedav1alpha1.PodIdentityProviderAzure, kedav1alpha1.PodIdentityProviderAzureWorkload: - if podIdentity.Provider == kedav1alpha1.PodIdentityProviderAzure { - // FIXME: Delete this for v2.15 - logger.Info("WARNING: Azure AD Pod Identity has been archived (https://github.com/Azure/aad-pod-identity#-announcement) and will be removed from KEDA on v2.15") - } if podIdentity.IdentityID != nil && *podIdentity.IdentityID == "" { return nil, kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, fmt.Errorf("IdentityID of PodIdentity should not be empty") } @@ -210,17 +231,18 @@ func ResolveAuthRefAndPodIdentity(ctx context.Context, client client.Client, log return authParams, podIdentity, nil } - authParams, _ := resolveAuthRef(ctx, client, logger, triggerAuthRef, nil, namespace, secretsLister) - return authParams, kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, nil + authParams, _, err := resolveAuthRef(ctx, client, logger, triggerAuthRef, nil, namespace, secretsLister) + return authParams, kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, err } // resolveAuthRef provides authentication parameters needed authenticate scaler with the environment. // based on authentication method defined in TriggerAuthentication, authParams and podIdentity is returned func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logger, triggerAuthRef *kedav1alpha1.AuthenticationRef, podSpec *corev1.PodSpec, - namespace string, secretsLister corev1listers.SecretLister) (map[string]string, kedav1alpha1.AuthPodIdentity) { + namespace string, secretsLister corev1listers.SecretLister) (map[string]string, kedav1alpha1.AuthPodIdentity, error) { result := make(map[string]string) var podIdentity kedav1alpha1.AuthPodIdentity + var err error if namespace != "" && triggerAuthRef != nil && triggerAuthRef.Name != "" { triggerAuthSpec, triggerNamespace, err := getTriggerAuthSpec(ctx, client, triggerAuthRef, namespace) @@ -257,20 +279,22 @@ func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logge if triggerAuthSpec.HashiCorpVault != nil && len(triggerAuthSpec.HashiCorpVault.Secrets) > 0 { vault := NewHashicorpVaultHandler(triggerAuthSpec.HashiCorpVault) err := vault.Initialize(logger) + defer vault.Stop() if err != nil { - logger.Error(err, "error authenticate to Vault", "triggerAuthRef.Name", triggerAuthRef.Name) - } else { - secrets, err := vault.ResolveSecrets(triggerAuthSpec.HashiCorpVault.Secrets) - if err != nil { - logger.Error(err, "could not get secrets from vault", - "triggerAuthRef.Name", triggerAuthRef.Name, - ) - } else { - for _, e := range secrets { - result[e.Parameter] = e.Value - } - } - vault.Stop() + logger.Error(err, "error authenticating to Vault", "triggerAuthRef.Name", triggerAuthRef.Name) + return result, podIdentity, err + } + + secrets, err := vault.ResolveSecrets(triggerAuthSpec.HashiCorpVault.Secrets) + if err != nil { + logger.Error(err, "could not get secrets from vault", + "triggerAuthRef.Name", triggerAuthRef.Name, + ) + return result, podIdentity, err + } + + for _, e := range secrets { + result[e.Parameter] = e.Value } } if triggerAuthSpec.AzureKeyVault != nil && len(triggerAuthSpec.AzureKeyVault.Secrets) > 0 { @@ -278,22 +302,24 @@ func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logge err := vaultHandler.Initialize(ctx, client, logger, triggerNamespace, secretsLister) if err != nil { logger.Error(err, "error authenticating to Azure Key Vault", "triggerAuthRef.Name", triggerAuthRef.Name) - } else { - for _, secret := range triggerAuthSpec.AzureKeyVault.Secrets { - res, err := vaultHandler.Read(ctx, secret.Name, secret.Version) - if err != nil { - logger.Error(err, "error trying to read secret from Azure Key Vault", "triggerAuthRef.Name", triggerAuthRef.Name, - "secret.Name", secret.Name, "secret.Version", secret.Version) - } else { - result[secret.Parameter] = res - } + return result, podIdentity, err + } + + for _, secret := range triggerAuthSpec.AzureKeyVault.Secrets { + res, err := vaultHandler.Read(ctx, secret.Name, secret.Version) + if err != nil { + logger.Error(err, "error trying to read secret from Azure Key Vault", "triggerAuthRef.Name", triggerAuthRef.Name, + "secret.Name", secret.Name, "secret.Version", secret.Version) + return result, podIdentity, err } + + result[secret.Parameter] = res } } } } - return result, podIdentity + return result, podIdentity, err } func getTriggerAuthSpec(ctx context.Context, client client.Client, triggerAuthRef *kedav1alpha1.AuthenticationRef, namespace string) (*kedav1alpha1.TriggerAuthenticationSpec, string, error) { diff --git a/pkg/scaling/resolver/scale_resolvers_test.go b/pkg/scaling/resolver/scale_resolvers_test.go index 32596824b46..de925409603 100644 --- a/pkg/scaling/resolver/scale_resolvers_test.go +++ b/pkg/scaling/resolver/scale_resolvers_test.go @@ -254,6 +254,8 @@ func TestResolveAuthRef(t *testing.T) { podSpec *corev1.PodSpec expected map[string]string expectedPodIdentity kedav1alpha1.AuthPodIdentity + isError bool + comment string }{ { name: "foo", @@ -323,6 +325,44 @@ func TestResolveAuthRef(t *testing.T) { expected: map[string]string{"host": secretData}, expectedPodIdentity: kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, }, + { + name: "triggerauth exists but hashicorp vault can't resolve", + existing: []runtime.Object{ + &kedav1alpha1.TriggerAuthentication{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: triggerAuthenticationName, + }, + Spec: kedav1alpha1.TriggerAuthenticationSpec{ + HashiCorpVault: &kedav1alpha1.HashiCorpVault{ + Address: "invalid-vault-address", + Authentication: "token", + Credential: &kedav1alpha1.Credential{ + Token: "my-token", + }, + Mount: "kubernetes", + Role: "my-role", + Secrets: []kedav1alpha1.VaultSecret{ + { + Key: "password", + Parameter: "password", + Path: "secret_v2/data/my-password-path", + }, + { + Key: "username", + Parameter: "username", + Path: "secret_v2/data/my-username-path", + }, + }, + }, + }, + }, + }, + isError: true, + comment: "\"my-vault-address-doesnt-exist/v1/auth/token/lookup-self\": unsupported protocol scheme \"\"", + soar: &kedav1alpha1.AuthenticationRef{Name: triggerAuthenticationName}, + expected: map[string]string{}, + }, { name: "triggerauth exists and config map", existing: []runtime.Object{ @@ -532,7 +572,7 @@ func TestResolveAuthRef(t *testing.T) { t.Run(test.name, func(t *testing.T) { ctx := context.Background() os.Setenv("KEDA_CLUSTER_OBJECT_NAMESPACE", clusterNamespace) // Inject test cluster namespace. - gotMap, gotPodIdentity := resolveAuthRef( + gotMap, gotPodIdentity, err := resolveAuthRef( ctx, fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(test.existing...).Build(), logf.Log.WithName("test"), @@ -540,6 +580,15 @@ func TestResolveAuthRef(t *testing.T) { test.podSpec, namespace, secretsLister) + + if err != nil && !test.isError { + t.Errorf("Expected success because %s got error, %s", test.comment, err) + } + + if test.isError && err == nil { + t.Errorf("Expected error because %s but got success, %#v", test.comment, test) + } + if diff := cmp.Diff(gotMap, test.expected); diff != "" { t.Errorf("Returned authParams are different: %s", diff) } diff --git a/pkg/scaling/scale_handler_test.go b/pkg/scaling/scale_handler_test.go index eacf593a7cf..ebecd99486d 100644 --- a/pkg/scaling/scale_handler_test.go +++ b/pkg/scaling/scale_handler_test.go @@ -27,11 +27,13 @@ import ( "github.com/antonmedv/expr" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" v2 "k8s.io/api/autoscaling/v2" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/metrics/pkg/apis/external_metrics" @@ -290,6 +292,126 @@ func TestCheckScaledObjectScalersWithError(t *testing.T) { assert.Equal(t, true, isError) } +func TestCheckScaledObjectScalersWithTriggerAuthError(t *testing.T) { + ctrl := gomock.NewController(t) + mockClient := mock_client.NewMockClient(ctrl) + mockExecutor := mock_executor.NewMockScaleExecutor(ctrl) + recorder := record.NewFakeRecorder(1) + + scaler := mock_scalers.NewMockScaler(ctrl) + scaler.EXPECT().Close(gomock.Any()) + + factory := func() (scalers.Scaler, *scalers.ScalerConfig, error) { + scaler := mock_scalers.NewMockScaler(ctrl) + scaler.EXPECT().GetMetricsAndActivity(gomock.Any(), gomock.Any()).Return([]external_metrics.ExternalMetricValue{}, false, errors.New("some error")) + scaler.EXPECT().Close(gomock.Any()) + return scaler, &scalers.ScalerConfig{}, nil + } + + deployment := appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-test", + Namespace: "test", + }, + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "container", + }, + }, + }, + }, + }, + } + + scaledObject := kedav1alpha1.ScaledObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: "scaledobject-test", + Namespace: "test", + }, + Spec: kedav1alpha1.ScaledObjectSpec{ + ScaleTargetRef: &kedav1alpha1.ScaleTarget{ + Name: deployment.Name, + }, + Triggers: []kedav1alpha1.ScaleTriggers{ + { + Name: triggerName1, + Type: "fake_trig1", + AuthenticationRef: &kedav1alpha1.AuthenticationRef{ + Name: "triggerauth-test", + }, + }, + }, + }, + Status: kedav1alpha1.ScaledObjectStatus{ + ScaleTargetGVKR: &kedav1alpha1.GroupVersionKindResource{ + Group: "apps", + Kind: "Deployment", + }, + ExternalMetricNames: []string{metricName1, metricName2}, + }, + } + + triggerAuth := kedav1alpha1.TriggerAuthentication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "triggerauth-test", + Namespace: "test", + }, + Spec: kedav1alpha1.TriggerAuthenticationSpec{ + HashiCorpVault: &kedav1alpha1.HashiCorpVault{ + Address: "invalid-vault-address", + Authentication: "token", + Credential: &kedav1alpha1.Credential{ + Token: "my-token", + }, + Mount: "kubernetes", + Role: "my-role", + Secrets: []kedav1alpha1.VaultSecret{ + { + Parameter: "username", + Key: "username", + Path: "secret_v2/data/my-username-path", + }, + }, + }, + }, + } + + scalerCache := cache.ScalersCache{ + Scalers: []cache.ScalerBuilder{{ + Scaler: scaler, + Factory: factory, + }}, + Recorder: recorder, + } + + mockClient.EXPECT().Get(gomock.Any(), types.NamespacedName{Name: deployment.Name, Namespace: deployment.Namespace}, gomock.Any()).SetArg(2, deployment) + mockClient.EXPECT().Get(gomock.Any(), types.NamespacedName{Name: triggerAuth.Name, Namespace: triggerAuth.Namespace}, gomock.Any()).SetArg(2, triggerAuth) + + sh := scaleHandler{ + client: mockClient, + scaleLoopContexts: &sync.Map{}, + scaleExecutor: mockExecutor, + globalHTTPTimeout: time.Duration(1000), + recorder: recorder, + scalerCaches: map[string]*cache.ScalersCache{}, + scalerCachesLock: &sync.RWMutex{}, + scaledObjectsMetricCache: metricscache.NewMetricsCache(), + } + + isActive, isError, _, _ := sh.getScaledObjectState(context.TODO(), &scaledObject) + scalerCache.Close(context.Background()) + + assert.Equal(t, false, isActive) + assert.Equal(t, true, isError) + + failureEvent := <-recorder.Events + assert.Contains(t, failureEvent, "KEDAScalerFailed") + assert.Contains(t, failureEvent, "unsupported protocol scheme") +} + func TestCheckScaledObjectFindFirstActiveNotIgnoreOthers(t *testing.T) { ctrl := gomock.NewController(t) mockClient := mock_client.NewMockClient(ctrl) diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go index 19af188a264..fa5c3736179 100644 --- a/pkg/scaling/scalers_builder.go +++ b/pkg/scaling/scalers_builder.go @@ -65,9 +65,23 @@ func (h *scaleHandler) buildScalers(ctx context.Context, withTriggers *kedav1alp ScalerIndex: triggerIndex, MetricType: trigger.MetricType, AsMetricSource: asMetricSource, + ScalerUniqueKey: trigger.Name + "-" + withTriggers.Name + "-" + withTriggers.Namespace + "-" + withTriggers.Kind, } authParams, podIdentity, err := resolver.ResolveAuthRefAndPodIdentity(ctx, h.client, logger, trigger.AuthenticationRef, podTemplateSpec, withTriggers.Namespace, h.secretsLister) + switch podIdentity.Provider { + case kedav1alpha1.PodIdentityProviderAzure: + // FIXME: Delete this for v2.15 + logger.Info("WARNING: Azure AD Pod Identity has been archived (https://github.com/Azure/aad-pod-identity#-announcement) and will be removed from KEDA on v2.15") + case kedav1alpha1.PodIdentityProviderAwsKiam: + // FIXME: Delete this for v2.15 + logger.Info("WARNING: AWS Kiam Identity has been abandoned (https://github.com/uswitch/kiam) and will be removed from KEDA on v2.15") + case kedav1alpha1.PodIdentityProviderAwsEKS: + // FIXME: Delete this for v3 + logger.Info("WARNING: AWS EKS Identity has been deprecated in favor of AWS Identity and will be removed from KEDA on v3") + default: + } + if err != nil { return nil, nil, err } diff --git a/tests/helper/helper.go b/tests/helper/helper.go index e0c8f734cc7..4cdbebdc88f 100644 --- a/tests/helper/helper.go +++ b/tests/helper/helper.go @@ -641,6 +641,22 @@ func KubectlDeleteMultipleWithTemplate(t *testing.T, data interface{}, templates } } +func KubectlCopyToPod(t *testing.T, content string, remotePath, pod, namespace string) { + tempFile, err := os.CreateTemp("", "copy-to-pod") + assert.NoErrorf(t, err, "cannot create temp file - %s", err) + defer os.Remove(tempFile.Name()) + + _, err = tempFile.WriteString(content) + assert.NoErrorf(t, err, "cannot write temp file - %s", err) + + commnand := fmt.Sprintf("kubectl cp %s %s:/%s -n %s", tempFile.Name(), pod, remotePath, namespace) + _, err = ExecuteCommand(commnand) + assert.NoErrorf(t, err, "cannot copy file - %s", err) + + err = tempFile.Close() + assert.NoErrorf(t, err, "cannot close temp file - %s", err) +} + func CreateKubernetesResources(t *testing.T, kc *kubernetes.Clientset, nsName string, data interface{}, templates []Template) { CreateNamespace(t, kc, nsName) KubectlApplyMultipleWithTemplate(t, data, templates) diff --git a/tests/internals/pause_scaledjob/pause_scaledjob_test.go b/tests/internals/pause_scaledjob/pause_scaledjob_test.go index 1cdacacc2ea..4a3f77ba3bc 100644 --- a/tests/internals/pause_scaledjob/pause_scaledjob_test.go +++ b/tests/internals/pause_scaledjob/pause_scaledjob_test.go @@ -165,6 +165,9 @@ func TestScaler(t *testing.T) { testPause(t, kc, listOptions) testUnpause(t, kc, data, listOptions) + testPause(t, kc, listOptions) + testUnpauseWithBool(t, kc, data, listOptions) + // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) } @@ -211,3 +214,16 @@ func testUnpause(t *testing.T, kc *kubernetes.Clientset, data templateData, list assert.True(t, WaitForJobByFilterCountUntilIteration(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1, listOptions), "job count should be %d after %d iterations", expectedTarget, iterationCountLatter) } + +func testUnpauseWithBool(t *testing.T, kc *kubernetes.Clientset, data templateData, listOptions metav1.ListOptions) { + t.Log("--- test setting Paused annotation to false ---") + + _, err := ExecuteCommand(fmt.Sprintf("kubectl annotate scaledjob %s autoscaling.keda.sh/paused=false --namespace %s --overwrite=true", scaledJobName, testNamespace)) + assert.NoErrorf(t, err, "cannot execute command - %s", err) + + t.Log("job count increases from zero as job is no longer paused") + + expectedTarget := data.MetricThreshold + assert.True(t, WaitForJobByFilterCountUntilIteration(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1, listOptions), + "job count should be %d after %d iterations", expectedTarget, iterationCountLatter) +} diff --git a/tests/internals/pause_scaledobject_explicitly/pause_scaledobject_explicitly_test.go b/tests/internals/pause_scaledobject_explicitly/pause_scaledobject_explicitly_test.go index a1e7523c5f9..cb5462c718f 100644 --- a/tests/internals/pause_scaledobject_explicitly/pause_scaledobject_explicitly_test.go +++ b/tests/internals/pause_scaledobject_explicitly/pause_scaledobject_explicitly_test.go @@ -113,21 +113,24 @@ func TestScaler(t *testing.T) { kc := GetKubernetesClient(t) data, templates := getTemplateData() - CreateKubernetesResources(t, kc, testNamespace, data, templates) - - // scaling to paused replica count - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), - "replica count should be 0 after 1 minute") - - // test scaling - testPauseWhenScaleOut(t, kc) - testScaleOut(t, kc) - testPauseWhenScaleIn(t, kc) - testScaleIn(t, kc) - testBothPauseAnnotationActive(t, kc) - - // cleanup - DeleteKubernetesResources(t, testNamespace, data, templates) + unpausedMethods := [](func(assert.TestingT)){removeScaledObjectPausedAnnotation, setScaledObjectPausedAnnotationFalse} + + for _, unpauseMethod := range unpausedMethods { + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + // scaling to paused replica count + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), + "replica count should be 0 after 1 minute") + // test scaling + testPauseWhenScaleOut(t, kc) + testScaleOut(t, kc, unpauseMethod) + testPauseWhenScaleIn(t, kc) + testScaleIn(t, kc, unpauseMethod) + testBothPauseAnnotationActive(t, kc) + + // cleanup + DeleteKubernetesResources(t, testNamespace, data, templates) + } } func getTemplateData() (templateData, []Template) { @@ -144,7 +147,7 @@ func getTemplateData() (templateData, []Template) { } func upsertScaledObjectPausedAnnotation(t assert.TestingT) { - _, err := ExecuteCommand(fmt.Sprintf("kubectl annotate scaledobject/%s -n %s autoscaling.keda.sh/paused='true' --overwrite", scaledObjectName, testNamespace)) + _, err := ExecuteCommand(fmt.Sprintf("kubectl annotate scaledobject/%s -n %s autoscaling.keda.sh/paused=true --overwrite", scaledObjectName, testNamespace)) assert.NoErrorf(t, err, "cannot execute command - %s", err) } @@ -152,6 +155,12 @@ func removeScaledObjectPausedAnnotation(t assert.TestingT) { _, err := ExecuteCommand(fmt.Sprintf("kubectl annotate scaledobject/%s -n %s autoscaling.keda.sh/paused- --overwrite", scaledObjectName, testNamespace)) assert.NoErrorf(t, err, "cannot execute command - %s", err) } + +func setScaledObjectPausedAnnotationFalse(t assert.TestingT) { + _, err := ExecuteCommand(fmt.Sprintf("kubectl annotate scaledobject/%s -n %s autoscaling.keda.sh/paused=false --overwrite", scaledObjectName, testNamespace)) + assert.NoErrorf(t, err, "cannot execute command - %s", err) +} + func upsertScaledObjectPausedReplicasAnnotation(t assert.TestingT, value int) { _, err := ExecuteCommand(fmt.Sprintf("kubectl annotate scaledobject/%s -n %s autoscaling.keda.sh/paused-replicas=%d --overwrite", scaledObjectName, testNamespace, value)) assert.NoErrorf(t, err, "cannot execute command - %s", err) @@ -178,10 +187,10 @@ func testPauseWhenScaleOut(t *testing.T, kc *kubernetes.Clientset) { AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) } -func testScaleOut(t *testing.T, kc *kubernetes.Clientset) { +func testScaleOut(t *testing.T, kc *kubernetes.Clientset, unpauseMethod func(assert.TestingT)) { t.Log("--- testing scale out ---") - removeScaledObjectPausedAnnotation(t) + unpauseMethod(t) KubernetesScaleDeployment(t, kc, monitoredDeploymentName, 5, testNamespace) assert.Truef(t, WaitForDeploymentReplicaReadyCount(t, kc, monitoredDeploymentName, testNamespace, 5, 60, testScaleOutWaitMin), @@ -209,10 +218,10 @@ func testPauseWhenScaleIn(t *testing.T, kc *kubernetes.Clientset) { "replica count should be 5 after %d minute(s)", testPauseAtNWaitMin) } -func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { +func testScaleIn(t *testing.T, kc *kubernetes.Clientset, unpauseMethod func(assert.TestingT)) { t.Log("--- testing scale in ---") - removeScaledObjectPausedAnnotation(t) + unpauseMethod(t) assert.Truef(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, testScaleInWaitMin), "replica count should be 0 after %d minutes", testScaleInWaitMin) } diff --git a/tests/run-all.go b/tests/run-all.go index 3c60c2c8e98..bf9029c5ff4 100644 --- a/tests/run-all.go +++ b/tests/run-all.go @@ -354,6 +354,7 @@ func printKedaLogs() { fmt.Println(operatorLogs) fmt.Println("##############################################") fmt.Println("##############################################") + saveLogToFile("keda-operator.log", operatorLogs) } msLogs, err := helper.FindPodLogs(kubeClient, "keda", "app=keda-metrics-apiserver", true) @@ -362,6 +363,7 @@ func printKedaLogs() { fmt.Println(msLogs) fmt.Println("##############################################") fmt.Println("##############################################") + saveLogToFile("keda-metrics-server.log", msLogs) } hooksLogs, err := helper.FindPodLogs(kubeClient, "keda", "app=keda-admission-webhooks", true) @@ -370,5 +372,20 @@ func printKedaLogs() { fmt.Println(hooksLogs) fmt.Println("##############################################") fmt.Println("##############################################") + saveLogToFile("keda-webhooks.log", hooksLogs) + } +} + +func saveLogToFile(file string, lines []string) { + f, err := os.Create(file) + if err != nil { + fmt.Print(err) + } + defer f.Close() + for _, line := range lines { + _, err := f.WriteString(line + "\n") + if err != nil { + fmt.Print(err) + } } } diff --git a/tests/scalers/aws/aws_cloudwatch_pod_identity/aws_cloudwatch_pod_identity_test.go b/tests/scalers/aws/aws_cloudwatch_pod_identity/aws_cloudwatch_pod_identity_test.go index 04697f58af3..12bba970c6a 100644 --- a/tests/scalers/aws/aws_cloudwatch_pod_identity/aws_cloudwatch_pod_identity_test.go +++ b/tests/scalers/aws/aws_cloudwatch_pod_identity/aws_cloudwatch_pod_identity_test.go @@ -51,7 +51,7 @@ metadata: namespace: {{.TestNamespace}} spec: podIdentity: - provider: aws-eks + provider: aws ` deploymentTemplate = ` diff --git a/tests/scalers/aws/aws_cloudwatch_pod_identity_eks/aws_cloudwatch_pod_identity_eks_test.go b/tests/scalers/aws/aws_cloudwatch_pod_identity_eks/aws_cloudwatch_pod_identity_eks_test.go new file mode 100644 index 00000000000..dd372b71adb --- /dev/null +++ b/tests/scalers/aws/aws_cloudwatch_pod_identity_eks/aws_cloudwatch_pod_identity_eks_test.go @@ -0,0 +1,225 @@ +//go:build e2e +// +build e2e + +package aws_cloudwatch_pod_identity_eks_test + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../../.env") + +const ( + testName = "aws-cloudwatch-pod-identity-eks-test" +) + +type templateData struct { + TestNamespace string + DeploymentName string + ScaledObjectName string + SecretName string + AwsAccessKeyID string + AwsSecretAccessKey string + AwsRegion string + CloudWatchMetricName string + CloudWatchMetricNamespace string + CloudWatchMetricDimensionName string + CloudWatchMetricDimensionValue string +} + +const ( + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: keda-trigger-auth-aws-credentials + namespace: {{.TestNamespace}} +spec: + podIdentity: + provider: aws-eks +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged + ports: + - containerPort: 80 +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + maxReplicaCount: 2 + minReplicaCount: 0 + cooldownPeriod: 1 + triggers: + - type: aws-cloudwatch + authenticationRef: + name: keda-trigger-auth-aws-credentials + metadata: + awsRegion: {{.AwsRegion}} + namespace: {{.CloudWatchMetricNamespace}} + dimensionName: {{.CloudWatchMetricDimensionName}} + dimensionValue: {{.CloudWatchMetricDimensionValue}} + metricName: {{.CloudWatchMetricName}} + targetMetricValue: "1" + activationTargetMetricValue: "5" + minMetricValue: "0" + metricCollectionTime: "120" + metricStatPeriod: "30" + identityOwner: operator +` +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + secretName = fmt.Sprintf("%s-secret", testName) + cloudwatchMetricName = fmt.Sprintf("cw-identity-%d", GetRandomNumber()) + awsAccessKeyID = os.Getenv("TF_AWS_ACCESS_KEY") + awsSecretAccessKey = os.Getenv("TF_AWS_SECRET_KEY") + awsRegion = os.Getenv("TF_AWS_REGION") + cloudwatchMetricNamespace = "KEDA" + cloudwatchMetricDimensionName = "dimensionName" + cloudwatchMetricDimensionValue = "dimensionValue" + maxReplicaCount = 2 + minReplicaCount = 0 +) + +func TestCloudWatchScaler(t *testing.T) { + // setup cloudwatch + cloudwatchClient := createCloudWatchClient() + setCloudWatchCustomMetric(t, cloudwatchClient, 0) + + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1), + "replica count should be %d after 1 minute", minReplicaCount) + + // test scaling + testActivation(t, kc, cloudwatchClient) + testScaleOut(t, kc, cloudwatchClient) + testScaleIn(t, kc, cloudwatchClient) + + // cleanup + DeleteKubernetesResources(t, testNamespace, data, templates) + + setCloudWatchCustomMetric(t, cloudwatchClient, 0) +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, cloudwatchClient *cloudwatch.Client) { + t.Log("--- testing activation ---") + setCloudWatchCustomMetric(t, cloudwatchClient, 3) + + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset, cloudwatchClient *cloudwatch.Client) { + t.Log("--- testing scale out ---") + setCloudWatchCustomMetric(t, cloudwatchClient, 10) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset, cloudwatchClient *cloudwatch.Client) { + t.Log("--- testing scale in ---") + + setCloudWatchCustomMetric(t, cloudwatchClient, 0) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) +} + +func setCloudWatchCustomMetric(t *testing.T, cloudwatchClient *cloudwatch.Client, value float64) { + _, err := cloudwatchClient.PutMetricData(context.Background(), &cloudwatch.PutMetricDataInput{ + MetricData: []types.MetricDatum{ + { + MetricName: aws.String(cloudwatchMetricName), + Dimensions: []types.Dimension{ + { + Name: aws.String(cloudwatchMetricDimensionName), + Value: aws.String(cloudwatchMetricDimensionValue), + }, + }, + Unit: types.StandardUnitNone, + Value: aws.Float64(value), + }, + }, + Namespace: aws.String(cloudwatchMetricNamespace), + }) + assert.NoErrorf(t, err, "failed to set cloudwatch metric - %s", err) +} + +func createCloudWatchClient() *cloudwatch.Client { + configOptions := make([]func(*config.LoadOptions) error, 0) + configOptions = append(configOptions, config.WithRegion(awsRegion)) + cfg, _ := config.LoadDefaultConfig(context.TODO(), configOptions...) + cfg.Credentials = credentials.NewStaticCredentialsProvider(awsAccessKeyID, awsSecretAccessKey, "") + return cloudwatch.NewFromConfig(cfg) +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + SecretName: secretName, + AwsAccessKeyID: base64.StdEncoding.EncodeToString([]byte(awsAccessKeyID)), + AwsSecretAccessKey: base64.StdEncoding.EncodeToString([]byte(awsSecretAccessKey)), + AwsRegion: awsRegion, + CloudWatchMetricName: cloudwatchMetricName, + CloudWatchMetricNamespace: cloudwatchMetricNamespace, + CloudWatchMetricDimensionName: cloudwatchMetricDimensionName, + CloudWatchMetricDimensionValue: cloudwatchMetricDimensionValue, + }, []Template{ + {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + } +} diff --git a/tests/scalers/aws/aws_dynamodb_pod_identity/aws_dynamodb_pod_identity_test.go b/tests/scalers/aws/aws_dynamodb_pod_identity/aws_dynamodb_pod_identity_test.go index ab5cc6cd7d5..aea9e215e76 100644 --- a/tests/scalers/aws/aws_dynamodb_pod_identity/aws_dynamodb_pod_identity_test.go +++ b/tests/scalers/aws/aws_dynamodb_pod_identity/aws_dynamodb_pod_identity_test.go @@ -53,7 +53,7 @@ metadata: namespace: {{.TestNamespace}} spec: podIdentity: - provider: aws-eks + provider: aws ` deploymentTemplate = ` diff --git a/tests/scalers/aws/aws_dynamodb_pod_identity_eks/aws_dynamodb_pod_identity_eks_test.go b/tests/scalers/aws/aws_dynamodb_pod_identity_eks/aws_dynamodb_pod_identity_eks_test.go new file mode 100644 index 00000000000..70a9c43a27b --- /dev/null +++ b/tests/scalers/aws/aws_dynamodb_pod_identity_eks/aws_dynamodb_pod_identity_eks_test.go @@ -0,0 +1,277 @@ +//go:build e2e +// +build e2e + +package aws_dynamodb_pod_identity_eks_test + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "strconv" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../../.env") + +const ( + testName = "aws-dynamodb-pod-identity-eks-test" +) + +type templateData struct { + TestNamespace string + DeploymentName string + ScaledObjectName string + SecretName string + AwsAccessKeyID string + AwsSecretAccessKey string + AwsRegion string + DynamoDBTableName string + ExpressionAttributeNames string + KeyConditionExpression string + ExpressionAttributeValues string +} + +const ( + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: keda-trigger-auth-aws-credentials + namespace: {{.TestNamespace}} +spec: + podIdentity: + provider: aws-eks +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged + ports: + - containerPort: 80 +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + maxReplicaCount: 2 + minReplicaCount: 0 + cooldownPeriod: 1 + triggers: + - type: aws-dynamodb + authenticationRef: + name: keda-trigger-auth-aws-credentials + metadata: + awsRegion: {{.AwsRegion}} + tableName: {{.DynamoDBTableName}} + expressionAttributeNames: '{{.ExpressionAttributeNames}}' + keyConditionExpression: '{{.KeyConditionExpression}}' + expressionAttributeValues: '{{.ExpressionAttributeValues}}' + targetValue: '1' + activationTargetValue: '4' + identityOwner: operator +` +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + secretName = fmt.Sprintf("%s-secret", testName) + dynamoDBTableName = fmt.Sprintf("table-identity-%d", GetRandomNumber()) + awsAccessKeyID = os.Getenv("TF_AWS_ACCESS_KEY") + awsSecretAccessKey = os.Getenv("TF_AWS_SECRET_KEY") + awsRegion = os.Getenv("TF_AWS_REGION") + expressionAttributeNames = "{ \"#k\" : \"event_type\"}" + keyConditionExpression = "#k = :key" + expressionAttributeValues = "{ \":key\" : {\"S\":\"scaling_event\"}}" + maxReplicaCount = 2 + minReplicaCount = 0 +) + +func TestDynamoDBScaler(t *testing.T) { + // setup dynamodb + dynamodbClient := createDynamoDBClient() + createDynamoDBTable(t, dynamodbClient) + + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1), + "replica count should be %d after 1 minute", minReplicaCount) + + // test scaling + testActivation(t, kc, dynamodbClient) + testScaleOut(t, kc, dynamodbClient) + testScaleIn(t, kc, dynamodbClient) + + // cleanup + DeleteKubernetesResources(t, testNamespace, data, templates) + cleanupTable(t, dynamodbClient) +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, dynamodbClient *dynamodb.Client) { + t.Log("--- testing activation ---") + addMessages(t, dynamodbClient, 3) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset, dynamodbClient *dynamodb.Client) { + t.Log("--- testing scale out ---") + addMessages(t, dynamodbClient, 6) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset, dynamodbClient *dynamodb.Client) { + t.Log("--- testing scale in ---") + + for i := 0; i < 6; i++ { + _, err := dynamodbClient.DeleteItem(context.Background(), &dynamodb.DeleteItemInput{ + TableName: aws.String(dynamoDBTableName), + Key: map[string]types.AttributeValue{ + "event_type": &types.AttributeValueMemberS{ + Value: "scaling_event", + }, + "event_id": &types.AttributeValueMemberS{ + Value: strconv.Itoa(i), + }, + }, + }) + assert.NoErrorf(t, err, "failed to delete item - %s", err) + } + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) +} + +func addMessages(t *testing.T, dynamodbClient *dynamodb.Client, messages int) { + for i := 0; i < messages; i++ { + _, err := dynamodbClient.PutItem(context.Background(), &dynamodb.PutItemInput{ + TableName: aws.String(dynamoDBTableName), + Item: map[string]types.AttributeValue{ + "event_type": &types.AttributeValueMemberS{ + Value: "scaling_event", + }, + "event_id": &types.AttributeValueMemberS{ + Value: strconv.Itoa(i), + }, + }, + }) + t.Log("Message enqueued") + assert.NoErrorf(t, err, "failed to create item - %s", err) + } +} + +func createDynamoDBTable(t *testing.T, dynamodbClient *dynamodb.Client) { + _, err := dynamodbClient.CreateTable(context.Background(), &dynamodb.CreateTableInput{ + TableName: aws.String(dynamoDBTableName), + KeySchema: []types.KeySchemaElement{ + {AttributeName: aws.String("event_type"), KeyType: types.KeyTypeHash}, + {AttributeName: aws.String("event_id"), KeyType: types.KeyTypeRange}, + }, + AttributeDefinitions: []types.AttributeDefinition{ + {AttributeName: aws.String("event_type"), AttributeType: types.ScalarAttributeTypeS}, + {AttributeName: aws.String("event_id"), AttributeType: types.ScalarAttributeTypeS}, + }, + ProvisionedThroughput: &types.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(5), + WriteCapacityUnits: aws.Int64(5), + }, + }) + assert.NoErrorf(t, err, "failed to create table - %s", err) + done := waitForTableActiveStatus(t, dynamodbClient) + if !done { + assert.True(t, true, "failed to create dynamodb") + } +} + +func waitForTableActiveStatus(t *testing.T, dynamodbClient *dynamodb.Client) bool { + for i := 0; i < 30; i++ { + describe, _ := dynamodbClient.DescribeTable(context.Background(), &dynamodb.DescribeTableInput{ + TableName: aws.String(dynamoDBTableName), + }) + t.Logf("Waiting for table ACTIVE status. current status - %s", describe.Table.TableStatus) + if describe.Table.TableStatus == "ACTIVE" { + return true + } + time.Sleep(time.Second * 2) + } + return false +} + +func cleanupTable(t *testing.T, dynamodbClient *dynamodb.Client) { + t.Log("--- cleaning up ---") + _, err := dynamodbClient.DeleteTable(context.Background(), &dynamodb.DeleteTableInput{ + TableName: aws.String(dynamoDBTableName), + }) + assert.NoErrorf(t, err, "cannot delete stream - %s", err) +} + +func createDynamoDBClient() *dynamodb.Client { + configOptions := make([]func(*config.LoadOptions) error, 0) + configOptions = append(configOptions, config.WithRegion(awsRegion)) + cfg, _ := config.LoadDefaultConfig(context.TODO(), configOptions...) + cfg.Credentials = credentials.NewStaticCredentialsProvider(awsAccessKeyID, awsSecretAccessKey, "") + return dynamodb.NewFromConfig(cfg) +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + SecretName: secretName, + AwsAccessKeyID: base64.StdEncoding.EncodeToString([]byte(awsAccessKeyID)), + AwsSecretAccessKey: base64.StdEncoding.EncodeToString([]byte(awsSecretAccessKey)), + AwsRegion: awsRegion, + DynamoDBTableName: dynamoDBTableName, + ExpressionAttributeNames: expressionAttributeNames, + KeyConditionExpression: keyConditionExpression, + ExpressionAttributeValues: expressionAttributeValues, + }, []Template{ + {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + } +} diff --git a/tests/scalers/aws/aws_dynamodb_streams_pod_identity/aws_dynamodb_streams_pod_identity_test.go b/tests/scalers/aws/aws_dynamodb_streams_pod_identity/aws_dynamodb_streams_pod_identity_test.go index f6e937f62ff..ad2b93db98b 100644 --- a/tests/scalers/aws/aws_dynamodb_streams_pod_identity/aws_dynamodb_streams_pod_identity_test.go +++ b/tests/scalers/aws/aws_dynamodb_streams_pod_identity/aws_dynamodb_streams_pod_identity_test.go @@ -91,7 +91,7 @@ metadata: namespace: {{.TestNamespace}} spec: podIdentity: - provider: aws-eks + provider: aws ` scaledObjectTemplate = ` diff --git a/tests/scalers/aws/aws_dynamodb_streams_pod_identity_eks/aws_dynamodb_streams_pod_identity_eks_test.go b/tests/scalers/aws/aws_dynamodb_streams_pod_identity_eks/aws_dynamodb_streams_pod_identity_eks_test.go new file mode 100644 index 00000000000..3d000d0db1d --- /dev/null +++ b/tests/scalers/aws/aws_dynamodb_streams_pod_identity_eks/aws_dynamodb_streams_pod_identity_eks_test.go @@ -0,0 +1,294 @@ +//go:build e2e +// +build e2e + +package aws_dynamodb_streams_pod_identity_eks_test + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "os" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + dynamodbTypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/aws-sdk-go-v2/service/dynamodbstreams" + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../../.env") + +const ( + testName = "aws-dynamodb-streams-pod-identity-eks-test" +) + +var ( + awsRegion = os.Getenv("TF_AWS_REGION") + awsAccessKey = os.Getenv("TF_AWS_ACCESS_KEY") + awsSecretKey = os.Getenv("TF_AWS_SECRET_KEY") + testNamespace = fmt.Sprintf("%s-ns", testName) + secretName = fmt.Sprintf("%s-secret", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + triggerAuthName = fmt.Sprintf("%s-ta", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + tableName = fmt.Sprintf("stream-identity-%d", GetRandomNumber()) + shardCount = 2 // default count + activationShardCount = 0 // default count +) + +type templateData struct { + TestNamespace string + SecretName string + AwsRegion string + AwsAccessKey string + AwsSecretKey string + DeploymentName string + TriggerAuthName string + ScaledObjectName string + TableName string + ShardCount int64 + ActivationShardCount int64 +} + +const ( + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged +` + + triggerAuthTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthName}} + namespace: {{.TestNamespace}} +spec: + podIdentity: + provider: aws-eks +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + deploymentName: {{.DeploymentName}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + maxReplicaCount: 2 + minReplicaCount: 0 + pollingInterval: 5 # Optional. Default: 30 seconds + cooldownPeriod: 1 # Optional. Default: 300 seconds + triggers: + - type: aws-dynamodb-streams + authenticationRef: + name: {{.TriggerAuthName}} + metadata: + awsRegion: {{.AwsRegion}} # Required + tableName: {{.TableName}} # Required + shardCount: "{{.ShardCount}}" # Optional. Default: 2 + activationShardCount: "{{.ActivationShardCount}}" # Optional. Default: 0 + identityOwner: operator +` +) + +func TestScaler(t *testing.T) { + t.Log("--- setting up ---") + require.NotEmpty(t, awsAccessKey, "AWS_ACCESS_KEY env variable is required for dynamodb streams tests") + require.NotEmpty(t, awsSecretKey, "AWS_SECRET_KEY env variable is required for dynamodb streams tests") + data, templates := getTemplateData() + + // Create DynamoDB table and the latest stream Arn for the table + dbClient, dbStreamsClient := setupDynamoDBStreams(t) + streamArn, err := getLatestStreamArn(dbClient) + assert.NoErrorf(t, err, "cannot get latest stream arn for the table - %s", err) + time.Sleep(10 * time.Second) + + // Get Shard Count + shardCount, err := getDynamoDBStreamShardCount(dbStreamsClient, streamArn) + assert.True(t, shardCount >= 2, "dynamodb stream shard count should be 2 or higher - %s", err) + + // Deploy nginx, secret, and triggerAuth + kc := GetKubernetesClient(t) + CreateNamespace(t, kc, testNamespace) + KubectlApplyWithTemplate(t, data, "deploymentTemplate", deploymentTemplate) + KubectlApplyWithTemplate(t, data, "triggerAuthTemplate", triggerAuthTemplate) + + // Wait for nginx to load + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 30, 3), + "replica count should start out as 0") + + // test scaling + testActivation(t, kc, data) + testScaleOut(t, kc, data, shardCount) + testScaleIn(t, kc, data, shardCount) + + // cleanup + DeleteKubernetesResources(t, testNamespace, data, templates) + cleanupDynamoDBTable(t, dbClient) +} + +func setupDynamoDBStreams(t *testing.T) (*dynamodb.Client, *dynamodbstreams.Client) { + var dbClient *dynamodb.Client + var dbStreamClient *dynamodbstreams.Client + + configOptions := make([]func(*config.LoadOptions) error, 0) + configOptions = append(configOptions, config.WithRegion(awsRegion)) + cfg, _ := config.LoadDefaultConfig(context.TODO(), configOptions...) + cfg.Credentials = credentials.NewStaticCredentialsProvider(awsAccessKey, awsSecretKey, "") + + dbClient = dynamodb.NewFromConfig(cfg) + dbStreamClient = dynamodbstreams.NewFromConfig(cfg) + + err := createTable(dbClient) + assert.NoErrorf(t, err, "cannot create dynamodb table - %s", err) + + return dbClient, dbStreamClient +} + +func createTable(db *dynamodb.Client) error { + keySchema := []dynamodbTypes.KeySchemaElement{ + { + AttributeName: aws.String("id"), + KeyType: dynamodbTypes.KeyTypeHash, + }, + } + attributeDefinitions := []dynamodbTypes.AttributeDefinition{ + { + AttributeName: aws.String("id"), + AttributeType: dynamodbTypes.ScalarAttributeTypeS, + }, + } + streamSpecification := &dynamodbTypes.StreamSpecification{ + StreamEnabled: aws.Bool(true), + StreamViewType: dynamodbTypes.StreamViewTypeNewImage, + } + _, err := db.CreateTable(context.Background(), &dynamodb.CreateTableInput{ + TableName: &tableName, + KeySchema: keySchema, + AttributeDefinitions: attributeDefinitions, + BillingMode: dynamodbTypes.BillingModePayPerRequest, + StreamSpecification: streamSpecification, + }) + return err +} + +func getLatestStreamArn(db *dynamodb.Client) (*string, error) { + input := dynamodb.DescribeTableInput{ + TableName: &tableName, + } + tableInfo, err := db.DescribeTable(context.Background(), &input) + if err != nil { + return nil, err + } + if nil == tableInfo.Table.LatestStreamArn { + return nil, errors.New("empty table stream arn") + } + return tableInfo.Table.LatestStreamArn, nil +} + +func getDynamoDBStreamShardCount(dbs *dynamodbstreams.Client, streamArn *string) (int64, error) { + input := dynamodbstreams.DescribeStreamInput{ + StreamArn: streamArn, + } + des, err := dbs.DescribeStream(context.Background(), &input) + if err != nil { + return -1, err + } + return int64(len(des.StreamDescription.Shards)), nil +} + +func getTemplateData() (templateData, []Template) { + base64AwsAccessKey := base64.StdEncoding.EncodeToString([]byte(awsAccessKey)) + base64AwsSecretKey := base64.StdEncoding.EncodeToString([]byte(awsSecretKey)) + + return templateData{ + TestNamespace: testNamespace, + SecretName: secretName, + AwsRegion: awsRegion, + AwsAccessKey: base64AwsAccessKey, + AwsSecretKey: base64AwsSecretKey, + DeploymentName: deploymentName, + TriggerAuthName: triggerAuthName, + ScaledObjectName: scaledObjectName, + TableName: tableName, + ShardCount: int64(shardCount), + }, []Template{ + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "triggerAuthTemplate", Config: triggerAuthTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + } +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing activation ---") + data.ActivationShardCount = 10 + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData, shardCount int64) { + t.Log("--- testing scale out ---") + // Deploy scalerObject with its target shardCount = the current dynamodb streams shard count and check if replicas scale out to 1 + t.Log("replicas should scale out to 1") + data.ShardCount = shardCount + data.ActivationShardCount = int64(activationShardCount) + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 180, 1), + "replica count should increase to 1") + + // Deploy scalerObject with its shardCount = 1 and check if replicas scale out to 2 (maxReplicaCount) + t.Log("then, replicas should scale out to 2") + data.ShardCount = 1 + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 180, 1), + "replica count should increase to 2") +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData, shardCount int64) { + t.Log("--- testing scale in ---") + // Deploy scalerObject with its target shardCount = the current dynamodb streams shard count and check if replicas scale in to 1 + data.ShardCount = shardCount + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 330, 1), + "replica count should decrease to 1 in 330 seconds") +} + +func cleanupDynamoDBTable(t *testing.T, db *dynamodb.Client) { + t.Log("--- cleaning up ---") + _, err := db.DeleteTable(context.Background(), + &dynamodb.DeleteTableInput{ + TableName: &tableName, + }) + assert.NoErrorf(t, err, "cannot delete dynamodb table - %s", err) +} diff --git a/tests/scalers/aws/aws_kinesis_stream_pod_identity/aws_kinesis_stream_pod_identity_test.go b/tests/scalers/aws/aws_kinesis_stream_pod_identity/aws_kinesis_stream_pod_identity_test.go index 69bfd38de08..49c3fde5c26 100644 --- a/tests/scalers/aws/aws_kinesis_stream_pod_identity/aws_kinesis_stream_pod_identity_test.go +++ b/tests/scalers/aws/aws_kinesis_stream_pod_identity/aws_kinesis_stream_pod_identity_test.go @@ -49,7 +49,7 @@ metadata: namespace: {{.TestNamespace}} spec: podIdentity: - provider: aws-eks + provider: aws ` deploymentTemplate = ` diff --git a/tests/scalers/aws/aws_kinesis_stream_pod_identity_eks/aws_kinesis_stream_pod_identity_eks_test.go b/tests/scalers/aws/aws_kinesis_stream_pod_identity_eks/aws_kinesis_stream_pod_identity_eks_test.go new file mode 100644 index 00000000000..245770f98e6 --- /dev/null +++ b/tests/scalers/aws/aws_kinesis_stream_pod_identity_eks/aws_kinesis_stream_pod_identity_eks_test.go @@ -0,0 +1,239 @@ +//go:build e2e +// +build e2e + +package aws_kinesis_stream_pod_identity_eks_test + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../../.env") + +const ( + testName = "aws-kinesis-stream-pod-identity-eks-test" +) + +type templateData struct { + TestNamespace string + DeploymentName string + ScaledObjectName string + SecretName string + AwsAccessKeyID string + AwsSecretAccessKey string + AwsRegion string + KinesisStream string +} + +const ( + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: keda-trigger-auth-aws-credentials + namespace: {{.TestNamespace}} +spec: + podIdentity: + provider: aws-eks +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged + ports: + - containerPort: 80 +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + maxReplicaCount: 2 + minReplicaCount: 0 + cooldownPeriod: 1 + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 15 + triggers: + - type: aws-kinesis-stream + authenticationRef: + name: keda-trigger-auth-aws-credentials + metadata: + awsRegion: {{.AwsRegion}} + streamName: {{.KinesisStream}} + shardCount: "3" + activationShardCount: "4" + identityOwner: operator +` +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + secretName = fmt.Sprintf("%s-secret", testName) + kinesisStreamName = fmt.Sprintf("kinesis-identity-%d", GetRandomNumber()) + awsAccessKeyID = os.Getenv("TF_AWS_ACCESS_KEY") + awsSecretAccessKey = os.Getenv("TF_AWS_SECRET_KEY") + awsRegion = os.Getenv("TF_AWS_REGION") + maxReplicaCount = 2 + minReplicaCount = 0 +) + +func TestKiensisScaler(t *testing.T) { + // setup kinesis + kinesisClient := createKinesisClient() + createKinesisStream(t, kinesisClient) + + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1), + "replica count should be %d after 1 minute", minReplicaCount) + + // test scaling + testActivation(t, kc, kinesisClient) + testScaleOut(t, kc, kinesisClient) + testScaleIn(t, kc, kinesisClient) + + // cleanup + DeleteKubernetesResources(t, testNamespace, data, templates) + cleanupStream(t, kinesisClient) +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, kinesisClient *kinesis.Client) { + t.Log("--- testing activation ---") + updateShardCount(t, kinesisClient, 3) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset, kinesisClient *kinesis.Client) { + t.Log("--- testing scale out ---") + updateShardCount(t, kinesisClient, 6) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset, kinesisClient *kinesis.Client) { + t.Log("--- testing scale in ---") + updateShardCount(t, kinesisClient, 3) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) +} + +func updateShardCount(t *testing.T, kinesisClient *kinesis.Client, shardCount int64) { + done := waitForStreamActiveStatus(t, kinesisClient) + if done { + _, err := kinesisClient.UpdateShardCount(context.Background(), &kinesis.UpdateShardCountInput{ + StreamName: &kinesisStreamName, + TargetShardCount: aws.Int32(int32(shardCount)), + ScalingType: types.ScalingTypeUniformScaling, + }) + assert.NoErrorf(t, err, "cannot update shard count - %s", err) + } + assert.True(t, true, "failed to update shard count") +} + +func createKinesisStream(t *testing.T, kinesisClient *kinesis.Client) { + _, err := kinesisClient.CreateStream(context.Background(), &kinesis.CreateStreamInput{ + StreamName: &kinesisStreamName, + ShardCount: aws.Int32(2), + }) + assert.NoErrorf(t, err, "failed to create stream - %s", err) + done := waitForStreamActiveStatus(t, kinesisClient) + if !done { + assert.True(t, true, "failed to create kinesis") + } +} + +func waitForStreamActiveStatus(t *testing.T, kinesisClient *kinesis.Client) bool { + for i := 0; i < 30; i++ { + describe, _ := kinesisClient.DescribeStream(context.Background(), &kinesis.DescribeStreamInput{ + StreamName: &kinesisStreamName, + }) + t.Logf("Waiting for stream ACTIVE status. current status - %s", describe.StreamDescription.StreamStatus) + if describe.StreamDescription.StreamStatus == "ACTIVE" { + return true + } + time.Sleep(time.Second * 2) + } + return false +} + +func cleanupStream(t *testing.T, kinesisClient *kinesis.Client) { + t.Log("--- cleaning up ---") + _, err := kinesisClient.DeleteStream(context.Background(), &kinesis.DeleteStreamInput{ + StreamName: &kinesisStreamName, + }) + assert.NoErrorf(t, err, "cannot delete stream - %s", err) +} + +func createKinesisClient() *kinesis.Client { + configOptions := make([]func(*config.LoadOptions) error, 0) + configOptions = append(configOptions, config.WithRegion(awsRegion)) + cfg, _ := config.LoadDefaultConfig(context.TODO(), configOptions...) + cfg.Credentials = credentials.NewStaticCredentialsProvider(awsAccessKeyID, awsSecretAccessKey, "") + return kinesis.NewFromConfig(cfg) +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + SecretName: secretName, + AwsAccessKeyID: base64.StdEncoding.EncodeToString([]byte(awsAccessKeyID)), + AwsSecretAccessKey: base64.StdEncoding.EncodeToString([]byte(awsSecretAccessKey)), + AwsRegion: awsRegion, + KinesisStream: kinesisStreamName, + }, []Template{ + {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + } +} diff --git a/tests/scalers/aws/aws_sqs_queue_pod_identity/aws_sqs_queue_pod_identity_test.go b/tests/scalers/aws/aws_sqs_queue_pod_identity/aws_sqs_queue_pod_identity_test.go index 08f2a0b4dab..a47c20989a4 100644 --- a/tests/scalers/aws/aws_sqs_queue_pod_identity/aws_sqs_queue_pod_identity_test.go +++ b/tests/scalers/aws/aws_sqs_queue_pod_identity/aws_sqs_queue_pod_identity_test.go @@ -47,7 +47,7 @@ metadata: namespace: {{.TestNamespace}} spec: podIdentity: - provider: aws-eks + provider: aws ` deploymentTemplate = ` diff --git a/tests/scalers/aws/aws_sqs_queue_pod_identity_eks/aws_sqs_queue_pod_identity_eks_test.go b/tests/scalers/aws/aws_sqs_queue_pod_identity_eks/aws_sqs_queue_pod_identity_eks_test.go new file mode 100644 index 00000000000..0288c0a169f --- /dev/null +++ b/tests/scalers/aws/aws_sqs_queue_pod_identity_eks/aws_sqs_queue_pod_identity_eks_test.go @@ -0,0 +1,219 @@ +//go:build e2e +// +build e2e + +package aws_sqs_queue_pod_identity_eks_test + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../../.env") + +const ( + testName = "aws-sqs-queue-pod-identity-eks-test" +) + +type templateData struct { + TestNamespace string + DeploymentName string + ScaledObjectName string + SecretName string + AwsAccessKeyID string + AwsSecretAccessKey string + AwsRegion string + SqsQueue string +} + +const ( + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: keda-trigger-auth-aws-credentials + namespace: {{.TestNamespace}} +spec: + podIdentity: + provider: aws-eks +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged + ports: + - containerPort: 80 +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + maxReplicaCount: 2 + minReplicaCount: 0 + cooldownPeriod: 1 + triggers: + - type: aws-sqs-queue + authenticationRef: + name: keda-trigger-auth-aws-credentials + metadata: + awsRegion: {{.AwsRegion}} + queueURL: {{.SqsQueue}} + queueLength: "1" + activationQueueLength: "5" + identityOwner: operator +` +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + secretName = fmt.Sprintf("%s-secret", testName) + sqsQueueName = fmt.Sprintf("queue-identity-%d", GetRandomNumber()) + awsAccessKeyID = os.Getenv("TF_AWS_ACCESS_KEY") + awsSecretAccessKey = os.Getenv("TF_AWS_SECRET_KEY") + awsRegion = os.Getenv("TF_AWS_REGION") + maxReplicaCount = 2 + minReplicaCount = 0 +) + +func TestSqsScaler(t *testing.T) { + // setup SQS + sqsClient := createSqsClient() + queue := createSqsQueue(t, sqsClient) + + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData(*queue.QueueUrl) + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1), + "replica count should be 0 after 1 minute") + + // test scaling + testActivation(t, kc, sqsClient, queue.QueueUrl) + testScaleOut(t, kc, sqsClient, queue.QueueUrl) + testScaleIn(t, kc, sqsClient, queue.QueueUrl) + + // cleanup + DeleteKubernetesResources(t, testNamespace, data, templates) + cleanupQueue(t, sqsClient, queue.QueueUrl) +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, sqsClient *sqs.Client, queueURL *string) { + t.Log("--- testing activation ---") + addMessages(t, sqsClient, queueURL, 4) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset, sqsClient *sqs.Client, queueURL *string) { + t.Log("--- testing scale out ---") + addMessages(t, sqsClient, queueURL, 6) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 180, 1), + "replica count should be 2 after 3 minutes") +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset, sqsClient *sqs.Client, queueURL *string) { + t.Log("--- testing scale in ---") + _, err := sqsClient.PurgeQueue(context.Background(), &sqs.PurgeQueueInput{ + QueueUrl: queueURL, + }) + assert.NoErrorf(t, err, "cannot clear queue - %s", err) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 180, 1), + "replica count should be 0 after 3 minutes") +} + +func addMessages(t *testing.T, sqsClient *sqs.Client, queueURL *string, messages int) { + for i := 0; i < messages; i++ { + msg := fmt.Sprintf("Message - %d", i) + _, err := sqsClient.SendMessage(context.Background(), &sqs.SendMessageInput{ + QueueUrl: queueURL, + MessageBody: aws.String(msg), + DelaySeconds: 10, + }) + assert.NoErrorf(t, err, "cannot send message - %s", err) + } +} + +func createSqsQueue(t *testing.T, sqsClient *sqs.Client) *sqs.CreateQueueOutput { + queue, err := sqsClient.CreateQueue(context.Background(), &sqs.CreateQueueInput{ + QueueName: &sqsQueueName, + Attributes: map[string]string{ + "DelaySeconds": "60", + "MessageRetentionPeriod": "86400", + }}) + assert.NoErrorf(t, err, "failed to create queue - %s", err) + return queue +} + +func cleanupQueue(t *testing.T, sqsClient *sqs.Client, queueURL *string) { + t.Log("--- cleaning up ---") + _, err := sqsClient.DeleteQueue(context.Background(), &sqs.DeleteQueueInput{ + QueueUrl: queueURL, + }) + assert.NoErrorf(t, err, "cannot delete queue - %s", err) +} + +func createSqsClient() *sqs.Client { + configOptions := make([]func(*config.LoadOptions) error, 0) + configOptions = append(configOptions, config.WithRegion(awsRegion)) + cfg, _ := config.LoadDefaultConfig(context.TODO(), configOptions...) + cfg.Credentials = credentials.NewStaticCredentialsProvider(awsAccessKeyID, awsSecretAccessKey, "") + return sqs.NewFromConfig(cfg) +} + +func getTemplateData(sqsQueue string) (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + SecretName: secretName, + AwsAccessKeyID: base64.StdEncoding.EncodeToString([]byte(awsAccessKeyID)), + AwsSecretAccessKey: base64.StdEncoding.EncodeToString([]byte(awsSecretAccessKey)), + AwsRegion: awsRegion, + SqsQueue: sqsQueue, + }, []Template{ + {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + } +} diff --git a/tests/scalers/azure/azure_managed_prometheus/helper/helper.go b/tests/scalers/azure/azure_managed_prometheus/helper/helper.go index 9875cb8a89b..179cce6a6bd 100644 --- a/tests/scalers/azure/azure_managed_prometheus/helper/helper.go +++ b/tests/scalers/azure/azure_managed_prometheus/helper/helper.go @@ -169,7 +169,7 @@ spec: template: spec: containers: - - image: quay.io/zroubalik/hey + - image: ghcr.io/kedacore/tests-hey name: test command: ["/bin/sh"] args: ["-c", "for i in $(seq 1 60);do echo $i;/hey -c 5 -n 30 http://{{.MonitoredAppName}}.{{.TestNamespace}}.svc;sleep 1;done"] @@ -195,7 +195,7 @@ spec: template: spec: containers: - - image: quay.io/zroubalik/hey + - image: ghcr.io/kedacore/tests-hey name: test command: ["/bin/sh"] args: ["-c", "for i in $(seq 1 60);do echo $i;/hey -c 5 -n 80 http://{{.MonitoredAppName}}.{{.TestNamespace}}.svc;sleep 1;done"] diff --git a/tests/scalers/loki/loki_test.go b/tests/scalers/loki/loki_test.go index 6049dc03da7..4d32072f724 100644 --- a/tests/scalers/loki/loki_test.go +++ b/tests/scalers/loki/loki_test.go @@ -97,7 +97,7 @@ spec: template: spec: containers: - - image: quay.io/zroubalik/hey + - image: ghcr.io/kedacore/tests-hey name: test command: ["/bin/sh"] args: ["-c", "for i in $(seq 1 30);do echo \"keda-scaler $i\";sleep 1;done"] @@ -123,7 +123,7 @@ spec: template: spec: containers: - - image: quay.io/zroubalik/hey + - image: ghcr.io/kedacore/tests-hey name: test command: ["/bin/sh"] args: ["-c", "for i in $(seq 1 30);do echo \"keda-scaler $i\";echo \"keda-scaler $((i*2))\";sleep 1;done"] diff --git a/tests/scalers/prometheus/prometheus_test.go b/tests/scalers/prometheus/prometheus_test.go index 98ed4560698..c379bee6fd9 100644 --- a/tests/scalers/prometheus/prometheus_test.go +++ b/tests/scalers/prometheus/prometheus_test.go @@ -161,7 +161,7 @@ spec: template: spec: containers: - - image: quay.io/zroubalik/hey + - image: ghcr.io/kedacore/tests-hey name: test command: ["/bin/sh"] args: ["-c", "for i in $(seq 1 60);do echo $i;/hey -c 5 -n 30 http://{{.MonitoredAppName}}.{{.TestNamespace}}.svc;sleep 1;done"] @@ -187,7 +187,7 @@ spec: template: spec: containers: - - image: quay.io/zroubalik/hey + - image: ghcr.io/kedacore/tests-hey name: test command: ["/bin/sh"] args: ["-c", "for i in $(seq 1 60);do echo $i;/hey -c 5 -n 80 http://{{.MonitoredAppName}}.{{.TestNamespace}}.svc;sleep 1;done"] diff --git a/tests/secret-providers/aws_identity_assume_role/aws_identity_assume_role_test.go b/tests/secret-providers/aws_identity_assume_role/aws_identity_assume_role_test.go new file mode 100644 index 00000000000..717db83a209 --- /dev/null +++ b/tests/secret-providers/aws_identity_assume_role/aws_identity_assume_role_test.go @@ -0,0 +1,327 @@ +//go:build e2e +// +build e2e + +package aws_identity_assume_role_test + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../../.env") + +const ( + testName = "aws-identity-assume-role-test" +) + +type templateData struct { + TestNamespace string + DeploymentName string + ScaledObjectName string + TriggerAuthenticationName string + SecretName string + AwsRegion string + RoleArn string + SqsQueue string +} + +const ( + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthenticationName}} + namespace: {{.TestNamespace}} +spec: + podIdentity: + provider: aws +` + + triggerAuthTemplateWithRoleArn = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthenticationName}} + namespace: {{.TestNamespace}} +spec: + podIdentity: + provider: aws + roleArn: {{.RoleArn}} +` + + triggerAuthTemplateWithIdentityOwner = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthenticationName}} + namespace: {{.TestNamespace}} +spec: + podIdentity: + provider: aws + identityOwner: workload +` + + serviceAccountTemplate = `apiVersion: v1 +kind: ServiceAccount +metadata: + name: workload + namespace: {{.TestNamespace}} + annotations: + eks.amazonaws.com/role-arn: {{.RoleArn}} +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + serviceAccountName: workload + containers: + - name: nginx + image: nginxinc/nginx-unprivileged + ports: + - containerPort: 80 +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + maxReplicaCount: 1 + minReplicaCount: 0 + pollingInterval: 5 + cooldownPeriod: 1 + triggers: + - type: aws-sqs-queue + authenticationRef: + name: {{.TriggerAuthenticationName}} + metadata: + awsRegion: {{.AwsRegion}} + queueURL: {{.SqsQueue}} + queueLength: "1" +` +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + secretName = fmt.Sprintf("%s-secret", testName) + sqsWorkload1QueueName = fmt.Sprintf("assume-role-workload1-queue-%d", GetRandomNumber()) + sqsWorkload2QueueName = fmt.Sprintf("assume-role-workload2-queue-%d", GetRandomNumber()) + awsAccessKeyID = os.Getenv("TF_AWS_ACCESS_KEY") + awsSecretAccessKey = os.Getenv("TF_AWS_SECRET_KEY") + awsRegion = os.Getenv("TF_AWS_REGION") + awsWorkload1RoleArn = os.Getenv("TF_AWS_WORKLOAD1_ROLE") + awsWorkload2RoleArn = fmt.Sprintf("arn:aws:iam::%s:role/workload-2-%s-role", os.Getenv("TF_AWS_ACCOUNT_ID"), os.Getenv("TEST_CLUSTER_NAME")) + maxReplicaCount = 1 + minReplicaCount = 0 + sqsMessageCount = 2 +) + +func TestSqsScaler(t *testing.T) { + // setup SQS + sqsClient := createSqsClient() + queueWorkload1 := createSqsQueue(t, sqsWorkload1QueueName, sqsClient) + queueWorkload2 := createSqsQueue(t, sqsWorkload2QueueName, sqsClient) + + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData(*queueWorkload1.QueueUrl) + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1), + "replica count should be 0 after 1 minute") + + // test scaling using KEDA identity + testScaleWithKEDAIdentity(t, kc, data, sqsClient, queueWorkload1.QueueUrl) + // test scaling using correct identity provided via podIdentity.RoleArn + // for a role that can be assumed + testScaleWithExplicitRoleArnUsingRoleAssumtion(t, kc, data, sqsClient, queueWorkload1.QueueUrl) + // test scaling using correct identity provided via podIdentity.RoleArn + // for a role to be used with web indentity (workload-2 role allows it) + testScaleWithExplicitRoleArnUsingWebIdentityRole(t, kc, data, sqsClient, queueWorkload2.QueueUrl) + // test scaling using correct identity provided via workload + testScaleWithWorkloadArn(t, kc, data, sqsClient, queueWorkload1.QueueUrl) + + // cleanup + DeleteKubernetesResources(t, testNamespace, data, templates) + cleanupQueue(t, sqsClient, queueWorkload1.QueueUrl) + cleanupQueue(t, sqsClient, queueWorkload2.QueueUrl) +} + +// testScaleWithKEDAIdentity checks that we don't scale out because KEDA identity +// doesn't have access to the queue, so even though there are messages, the workload +// won't scale +func testScaleWithKEDAIdentity(t *testing.T, kc *kubernetes.Clientset, data templateData, sqsClient *sqs.Client, queueURL *string) { + t.Log("--- testing scalig out with KEDA role ---") + data.ScaledObjectName = "scale-with-keda-identity" + data.TriggerAuthenticationName = "scale-with-keda-identity" + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + KubectlApplyWithTemplate(t, data, "triggerAuthTemplate", triggerAuthenticationTemplate) + addMessages(t, sqsClient, queueURL, sqsMessageCount) + // replicas shouldn't change + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) + testScaleIn(t, kc, sqsClient, queueURL) + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + KubectlDeleteWithTemplate(t, data, "triggerAuthTemplate", triggerAuthenticationTemplate) +} + +func testScaleWithExplicitRoleArnUsingRoleAssumtion(t *testing.T, kc *kubernetes.Clientset, data templateData, sqsClient *sqs.Client, queueURL *string) { + t.Log("--- testing scalig out with explicit arn role with role assumption ---") + data.ScaledObjectName = "scale-using-role-assumtion" + data.TriggerAuthenticationName = "scale-using-role-assumtion" + KubectlApplyWithTemplate(t, data, "triggerAuthTemplateWithIdentityID", triggerAuthTemplateWithRoleArn) + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + addMessages(t, sqsClient, queueURL, sqsMessageCount) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 180, 1), + "replica count should be 2 after 3 minutes") + testScaleIn(t, kc, sqsClient, queueURL) + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + KubectlDeleteWithTemplate(t, data, "triggerAuthTemplate", triggerAuthTemplateWithRoleArn) +} + +func testScaleWithExplicitRoleArnUsingWebIdentityRole(t *testing.T, kc *kubernetes.Clientset, data templateData, sqsClient *sqs.Client, queueURL *string) { + t.Log("--- testing scalig out with explicit arn role with web indentity role ---") + data.RoleArn = awsWorkload2RoleArn + data.SqsQueue = *queueURL + data.ScaledObjectName = "scale-using-web-identity" + data.TriggerAuthenticationName = "scale-using-web-identity" + KubectlApplyWithTemplate(t, data, "triggerAuthTemplateWithIdentityID", triggerAuthTemplateWithRoleArn) + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + addMessages(t, sqsClient, queueURL, sqsMessageCount) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 180, 1), + "replica count should be 2 after 3 minutes") + testScaleIn(t, kc, sqsClient, queueURL) + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + KubectlDeleteWithTemplate(t, data, "triggerAuthTemplate", triggerAuthTemplateWithRoleArn) +} + +func testScaleWithWorkloadArn(t *testing.T, kc *kubernetes.Clientset, data templateData, sqsClient *sqs.Client, queueURL *string) { + t.Log("--- testing scalig out with workload arn role ---") + data.ScaledObjectName = "scale-using-workload-arn" + data.TriggerAuthenticationName = "scale-using-workload-arn" + KubectlApplyWithTemplate(t, data, "triggerAuthTemplateWithIdentityOwner", triggerAuthTemplateWithIdentityOwner) + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + addMessages(t, sqsClient, queueURL, sqsMessageCount) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 180, 1), + "replica count should be 2 after 3 minutes") + testScaleIn(t, kc, sqsClient, queueURL) + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + KubectlDeleteWithTemplate(t, data, "triggerAuthTemplateWithIdentityOwner", triggerAuthTemplateWithIdentityOwner) +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset, sqsClient *sqs.Client, queueURL *string) { + t.Log("--- testing scalig in ---") + totalDeletedMessages := 0 + + for { + response, _ := sqsClient.ReceiveMessage(context.Background(), &sqs.ReceiveMessageInput{ + QueueUrl: queueURL, + MaxNumberOfMessages: int32(sqsMessageCount), + }) + if response != nil { + for _, message := range response.Messages { + _, err := sqsClient.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ + QueueUrl: queueURL, + ReceiptHandle: message.ReceiptHandle, + }) + assert.NoErrorf(t, err, "cannot delete message - %s", err) + totalDeletedMessages++ + } + } + if totalDeletedMessages == sqsMessageCount { + break + } + + time.Sleep(time.Second) + } + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 180, 1), + "replica count should be 0 after 3 minutes") +} + +func addMessages(t *testing.T, sqsClient *sqs.Client, queueURL *string, messages int) { + for i := 0; i < messages; i++ { + msg := fmt.Sprintf("Message - %d", i) + _, err := sqsClient.SendMessage(context.Background(), &sqs.SendMessageInput{ + QueueUrl: queueURL, + MessageBody: aws.String(msg), + DelaySeconds: 10, + }) + assert.NoErrorf(t, err, "cannot send message - %s", err) + } +} + +func createSqsQueue(t *testing.T, queueName string, sqsClient *sqs.Client) *sqs.CreateQueueOutput { + queue, err := sqsClient.CreateQueue(context.Background(), &sqs.CreateQueueInput{ + QueueName: &queueName, + Attributes: map[string]string{ + "DelaySeconds": "60", + "MessageRetentionPeriod": "86400", + }}) + assert.NoErrorf(t, err, "failed to create queue - %s", err) + return queue +} + +func cleanupQueue(t *testing.T, sqsClient *sqs.Client, queueURL *string) { + t.Log("--- cleaning up ---") + _, err := sqsClient.DeleteQueue(context.Background(), &sqs.DeleteQueueInput{ + QueueUrl: queueURL, + }) + assert.NoErrorf(t, err, "cannot delete queue - %s", err) +} + +func createSqsClient() *sqs.Client { + configOptions := make([]func(*config.LoadOptions) error, 0) + configOptions = append(configOptions, config.WithRegion(awsRegion)) + cfg, _ := config.LoadDefaultConfig(context.TODO(), configOptions...) + cfg.Credentials = credentials.NewStaticCredentialsProvider(awsAccessKeyID, awsSecretAccessKey, "") + return sqs.NewFromConfig(cfg) +} + +func getTemplateData(sqsQueue string) (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + SecretName: secretName, + AwsRegion: awsRegion, + RoleArn: awsWorkload1RoleArn, + SqsQueue: sqsQueue, + }, []Template{ + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "serviceAccountTemplate", Config: serviceAccountTemplate}, + } +} diff --git a/tests/secret-providers/hashicorp_vault/hashicorp_vault_test.go b/tests/secret-providers/hashicorp_vault/hashicorp_vault_test.go index 864606be077..cd135e50d4a 100644 --- a/tests/secret-providers/hashicorp_vault/hashicorp_vault_test.go +++ b/tests/secret-providers/hashicorp_vault/hashicorp_vault_test.go @@ -45,7 +45,7 @@ var ( postgreSQLUsername, postgreSQLPassword, testNamespace, postgreSQLDatabase) prometheusServerName = fmt.Sprintf("%s-prom-server", testName) minReplicaCount = 0 - maxReplicaCount = 2 + maxReplicaCount = 1 ) type templateData struct { @@ -57,6 +57,7 @@ type templateData struct { VaultSecretPath string VaultPromDomain string SecretName string + HashiCorpAuthentication string HashiCorpToken string PostgreSQLStatefulSetName string PostgreSQLConnectionStringBase64 string @@ -153,7 +154,6 @@ spec: - type: postgresql metadata: targetQueryValue: "4" - activationTargetQueryValue: "5" query: "SELECT CEIL(COUNT(*) / 5) FROM task_instance WHERE state='running' OR state='queued'" authenticationRef: name: {{.TriggerAuthenticationName}} @@ -212,39 +212,6 @@ spec: type: ClusterIP ` - lowLevelRecordsJobTemplate = ` -apiVersion: batch/v1 -kind: Job -metadata: - labels: - app: postgresql-insert-low-level-job - name: postgresql-insert-low-level-job - namespace: {{.TestNamespace}} -spec: - template: - metadata: - labels: - app: postgresql-insert-low-level-job - spec: - containers: - - image: ghcr.io/kedacore/tests-postgresql - imagePullPolicy: Always - name: postgresql-processor-test - command: - - /app - - insert - env: - - name: TASK_INSTANCES_COUNT - value: "20" - - name: CONNECTION_STRING - valueFrom: - secretKeyRef: - name: {{.SecretName}} - key: postgresql_conn_str - restartPolicy: Never - backoffLimit: 4 -` - insertRecordsJobTemplate = ` apiVersion: batch/v1 kind: Job @@ -370,9 +337,12 @@ metadata: spec: hashiCorpVault: address: http://vault.{{.VaultNamespace}}:8200 - authentication: token + authentication: {{.HashiCorpAuthentication}} + role: keda + mount: kubernetes credential: token: {{.HashiCorpToken}} + serviceAccount: /var/run/secrets/kubernetes.io/serviceaccount/token secrets: - key: "ca_chain" parameter: "ca" @@ -409,40 +379,12 @@ spec: metadata: serverAddress: https://{{.PrometheusServerName}}.{{.TestNamespace}}.svc:80 authModes: "tls" - metricName: http_requests_total threshold: '20' - activationThreshold: '20' - query: sum(rate(http_requests_total{app="{{.MonitoredAppName}}"}[2m])) + query: http_requests_total{app="{{.MonitoredAppName}}"} authenticationRef: name: {{.TriggerAuthenticationName}} ` - generatePromLowLevelLoadJobTemplate = `apiVersion: batch/v1 -kind: Job -metadata: - name: generate-low-level-requests-job - namespace: {{.TestNamespace}} -spec: - template: - spec: - containers: - - image: quay.io/zroubalik/hey - name: test - command: ["/bin/sh"] - args: ["-c", "for i in $(seq 1 60);do echo $i;/hey -c 5 -n 30 http://{{.MonitoredAppName}}.{{.TestNamespace}}.svc;sleep 1;done"] - securityContext: - allowPrivilegeEscalation: false - runAsNonRoot: true - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - restartPolicy: Never - activeDeadlineSeconds: 100 - backoffLimit: 2 - ` - generatePromLoadJobTemplate = `apiVersion: batch/v1 kind: Job metadata: @@ -452,10 +394,10 @@ spec: template: spec: containers: - - image: quay.io/zroubalik/hey + - image: ghcr.io/kedacore/tests-hey:latest name: test command: ["/bin/sh"] - args: ["-c", "for i in $(seq 1 60);do echo $i;/hey -c 5 -n 80 http://{{.MonitoredAppName}}.{{.TestNamespace}}.svc;sleep 1;done"] + args: ["-c", "for i in $(seq 1 60);do echo $i;/hey -c 15 -n 240 http://{{.MonitoredAppName}}.{{.TestNamespace}}.svc;sleep 1;done"] securityContext: allowPrivilegeEscalation: false runAsNonRoot: true @@ -468,34 +410,52 @@ spec: activeDeadlineSeconds: 100 backoffLimit: 2 ` + pkiPolicyTemplate = `path "pki*" { + capabilities = [ "create", "read", "update", "delete", "list", "sudo" ] +}` ) -func TestPrometheusScalerWithMtls(t *testing.T) { - // Create kubernetes resources - kc := GetKubernetesClient(t) - hashiCorpToken, promPkiData := setupHashiCorpVault(t, kc, 2, true) - prometheus.Install(t, kc, prometheusServerName, testNamespace, promPkiData) - - // Create kubernetes resources for testing - data, templates := getPrometheusTemplateData() - data.HashiCorpToken = RemoveANSI(hashiCorpToken) - data.VaultSecretPath = fmt.Sprintf("pki/issue/%s", testNamespace) - KubectlApplyMultipleWithTemplate(t, data, templates) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, monitoredAppName, testNamespace, 1, 60, 3), - "replica count should be %d after 3 minutes", minReplicaCount) - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), - "replica count should be %d after 3 minutes", minReplicaCount) - - testPromActivation(t, kc, data) - testPromScaleOut(t, kc, data) - testScaleIn(t, kc) - - // cleanup - KubectlDeleteMultipleWithTemplate(t, data, templates) - prometheus.Uninstall(t, prometheusServerName, testNamespace, nil) +func TestPkiSecretsEngine(t *testing.T) { + tests := []struct { + authentication string + }{ + { + authentication: "kubernetes", + }, + { + authentication: "token", + }, + } + + for _, test := range tests { + t.Run(test.authentication, func(t *testing.T) { + // Create kubernetes resources + kc := GetKubernetesClient(t) + useKubernetesAuth := test.authentication == "kubernetes" + hashiCorpToken, promPkiData := setupHashiCorpVault(t, kc, 2, useKubernetesAuth, true) + prometheus.Install(t, kc, prometheusServerName, testNamespace, promPkiData) + + // Create kubernetes resources for testing + data, templates := getPrometheusTemplateData() + data.HashiCorpAuthentication = test.authentication + data.HashiCorpToken = RemoveANSI(hashiCorpToken) + data.VaultSecretPath = fmt.Sprintf("pki/issue/%s", testNamespace) + KubectlApplyMultipleWithTemplate(t, data, templates) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, monitoredAppName, testNamespace, 1, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) + + testPromScaleOut(t, kc, data) + + // cleanup + KubectlDeleteMultipleWithTemplate(t, data, templates) + prometheus.Uninstall(t, prometheusServerName, testNamespace, nil) + }) + } } -func TestPostreSQLScaler(t *testing.T) { +func TestSecretsEngine(t *testing.T) { tests := []struct { name string vaultEngineVersion uint @@ -520,7 +480,7 @@ func TestPostreSQLScaler(t *testing.T) { data, postgreSQLtemplates := getPostgreSQLTemplateData() CreateKubernetesResources(t, kc, testNamespace, data, postgreSQLtemplates) - hashiCorpToken, _ := setupHashiCorpVault(t, kc, test.vaultEngineVersion, false) + hashiCorpToken, _ := setupHashiCorpVault(t, kc, test.vaultEngineVersion, false, false) assert.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, postgreSQLStatefulSetName, testNamespace, 1, 60, 3), "replica count should be %d after 3 minutes", 1) @@ -540,9 +500,7 @@ func TestPostreSQLScaler(t *testing.T) { assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), "replica count should be %d after 3 minutes", minReplicaCount) - testActivation(t, kc, data) testScaleOut(t, kc, data) - testScaleIn(t, kc) // cleanup KubectlDeleteMultipleWithTemplate(t, data, templates) @@ -590,7 +548,7 @@ func setupHashiCorpVaultPki(t *testing.T, podName string, nameSpace string) *pro return &pkiData } -func setupHashiCorpVault(t *testing.T, kc *kubernetes.Clientset, kvVersion uint, pki bool) (string, *prometheus.VaultPkiData) { +func setupHashiCorpVault(t *testing.T, kc *kubernetes.Clientset, kvVersion uint, useKubernetesAuth, pki bool) (string, *prometheus.VaultPkiData) { CreateNamespace(t, kc, vaultNamespace) _, err := ExecuteCommand("helm repo add hashicorp https://helm.releases.hashicorp.com") @@ -611,9 +569,27 @@ func setupHashiCorpVault(t *testing.T, kc *kubernetes.Clientset, kvVersion uint, podName := "vault-0" + // Enable Kubernetes auth + if useKubernetesAuth { + if pki { + remoteFile := "/tmp/policy.hcl" + KubectlCopyToPod(t, pkiPolicyTemplate, remoteFile, podName, vaultNamespace) + assert.NoErrorf(t, err, "cannot create policy file in hashicorp vault - %s", err) + _, _, err = ExecCommandOnSpecificPod(t, podName, vaultNamespace, fmt.Sprintf("vault policy write pkiPolicy %s", remoteFile)) + assert.NoErrorf(t, err, "cannot create policy in hashicorp vault - %s", err) + } + _, _, err = ExecCommandOnSpecificPod(t, podName, vaultNamespace, "vault auth enable kubernetes") + assert.NoErrorf(t, err, "cannot enable kubernetes in hashicorp vault - %s", err) + _, _, err = ExecCommandOnSpecificPod(t, podName, vaultNamespace, "vault write auth/kubernetes/config kubernetes_host=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT") + assert.NoErrorf(t, err, "cannot set kubernetes host in hashicorp vault - %s", err) + _, _, err = ExecCommandOnSpecificPod(t, podName, vaultNamespace, "vault write auth/kubernetes/role/keda bound_service_account_names=keda-operator bound_service_account_namespaces=keda policies=pkiPolicy ttl=1h") + assert.NoErrorf(t, err, "cannot cerate keda role in hashicorp vault - %s", err) + } // Create kv secret - _, _, err = ExecCommandOnSpecificPod(t, podName, vaultNamespace, fmt.Sprintf("vault kv put secret/keda connectionString=%s", postgreSQLConnectionString)) - assert.NoErrorf(t, err, "cannot put connection string in hashicorp vault - %s", err) + if !pki { + _, _, err = ExecCommandOnSpecificPod(t, podName, vaultNamespace, fmt.Sprintf("vault kv put secret/keda connectionString=%s", postgreSQLConnectionString)) + assert.NoErrorf(t, err, "cannot put connection string in hashicorp vault - %s", err) + } // Create PKI Backend var pkiData *prometheus.VaultPkiData @@ -621,10 +597,13 @@ func setupHashiCorpVault(t *testing.T, kc *kubernetes.Clientset, kvVersion uint, pkiData = setupHashiCorpVaultPki(t, podName, vaultNamespace) } - out, _, err := ExecCommandOnSpecificPod(t, podName, vaultNamespace, "vault token create -field token") - assert.NoErrorf(t, err, "cannot create hashicorp vault token - %s", err) - - return out, pkiData + // Generate Hashicorp Token + token := "INVALID" + if !useKubernetesAuth { + token, _, err = ExecCommandOnSpecificPod(t, podName, vaultNamespace, "vault token create -field token") + assert.NoErrorf(t, err, "cannot create hashicorp vault token - %s", err) + } + return token, pkiData } func cleanupHashiCorpVault(t *testing.T) { @@ -637,13 +616,6 @@ func cleanupHashiCorpVault(t *testing.T) { DeleteNamespace(t, vaultNamespace) } -func testPromActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { - t.Log("--- testing activation ---") - KubectlReplaceWithTemplate(t, data, "generateLowLevelLoadJobTemplate", generatePromLowLevelLoadJobTemplate) - - AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) -} - func testPromScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing scale out ---") KubectlReplaceWithTemplate(t, data, "generateLoadJobTemplate", generatePromLoadJobTemplate) @@ -652,13 +624,6 @@ func testPromScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) "replica count should be %d after 3 minutes", maxReplicaCount) } -func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { - t.Log("--- testing activation ---") - KubectlReplaceWithTemplate(t, data, "lowLevelRecordsJobTemplate", lowLevelRecordsJobTemplate) - - AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) -} - func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing scale out ---") KubectlReplaceWithTemplate(t, data, "insertRecordsJobTemplate", insertRecordsJobTemplate) @@ -667,13 +632,6 @@ func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { "replica count should be %d after 5 minutes", maxReplicaCount) } -func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { - t.Log("--- testing scale in ---") - - assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 5), - "replica count should be %d after 5 minutes", minReplicaCount) -} - var data = templateData{ TestNamespace: testNamespace, PostgreSQLStatefulSetName: postgreSQLStatefulSetName, diff --git a/tests/sequential/opentelemetry_metrics/opentelemetry_metrics_test.go b/tests/sequential/opentelemetry_metrics/opentelemetry_metrics_test.go index 32e11becc89..1373c3c9471 100644 --- a/tests/sequential/opentelemetry_metrics/opentelemetry_metrics_test.go +++ b/tests/sequential/opentelemetry_metrics/opentelemetry_metrics_test.go @@ -24,9 +24,14 @@ import ( ) const ( - testName = "opentelemetry-metrics-test" - labelScaledObject = "scaledObject" - labelType = "type" + testName = "opentelemetry-metrics-test" + labelScaledObject = "scaledObject" + labelType = "type" + labelCloudEventSource = "cloudEventSource" + eventsink = "eventsink" + eventsinkValue = "opentelemetry-metrics-test-ce" + eventsinkType = "eventsinktype" + eventsinkTypeValue = "http" ) var ( @@ -38,6 +43,11 @@ var ( wrongScalerName = fmt.Sprintf("%s-wrong-scaler", testName) cronScaledJobName = fmt.Sprintf("%s-cron-sj", testName) clientName = fmt.Sprintf("%s-client", testName) + cloudEventSourceName = fmt.Sprintf("%s-ce", testName) + wrongCloudEventSourceName = fmt.Sprintf("%s-ce-w", testName) + cloudEventHTTPReceiverName = fmt.Sprintf("%s-cloudevent-http-receiver", testName) + cloudEventHTTPServiceName = fmt.Sprintf("%s-cloudevent-http-service", testName) + cloudEventHTTPServiceURL = fmt.Sprintf("http://%s.%s.svc.cluster.local:8899", cloudEventHTTPServiceName, testNamespace) kedaOperatorCollectorPrometheusExportURL = "http://opentelemetry-collector.open-telemetry-system.svc.cluster.local:8889/metrics" namespaceString = "namespace" kedaNamespace = "keda" @@ -46,15 +56,20 @@ var ( ) type templateData struct { - TestName string - TestNamespace string - DeploymentName string - ScaledObjectName string - WrongScaledObjectName string - WrongScalerName string - CronScaledJobName string - MonitoredDeploymentName string - ClientName string + TestName string + TestNamespace string + DeploymentName string + ScaledObjectName string + WrongScaledObjectName string + WrongScalerName string + CronScaledJobName string + MonitoredDeploymentName string + ClientName string + CloudEventSourceName string + WrongCloudEventSourceName string + CloudEventHTTPReceiverName string + CloudEventHTTPServiceName string + CloudEventHTTPServiceURL string } const ( @@ -268,6 +283,77 @@ spec: podSelector: 'app={{.MonitoredDeploymentName}}' value: '1' ` + + cloudEventSourceTemplate = ` +apiVersion: eventing.keda.sh/v1alpha1 +kind: CloudEventSource +metadata: + name: {{.CloudEventSourceName}} + namespace: {{.TestNamespace}} +spec: + clusterName: cluster-sample + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} +` + wrongCloudEventSourceTemplate = ` +apiVersion: eventing.keda.sh/v1alpha1 +kind: CloudEventSource +metadata: + name: {{.WrongCloudEventSourceName}} + namespace: {{.TestNamespace}} +spec: + clusterName: cluster-sample + destination: + http: + uri: http://fo.wo +` + + cloudEventHTTPServiceTemplate = ` + apiVersion: v1 + kind: Service + metadata: + name: {{.CloudEventHTTPServiceName}} + namespace: {{.TestNamespace}} + spec: + type: ClusterIP + ports: + - protocol: TCP + port: 8899 + targetPort: 8899 + selector: + app: {{.CloudEventHTTPReceiverName}} + ` + + cloudEventHTTPReceiverTemplate = ` + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + deploy: {{.CloudEventHTTPReceiverName}} + name: {{.CloudEventHTTPReceiverName}} + namespace: {{.TestNamespace}} + spec: + selector: + matchLabels: + app: {{.CloudEventHTTPReceiverName}} + replicas: 1 + template: + metadata: + labels: + app: {{.CloudEventHTTPReceiverName}} + spec: + containers: + - name: httpreceiver + image: ghcr.io/kedacore/tests-cloudevents-http:latest + ports: + - containerPort: 8899 + resources: + requests: + cpu: "200m" + limits: + cpu: "500m" + ` ) func TestOpenTelemetryMetrics(t *testing.T) { @@ -297,6 +383,8 @@ func TestOpenTelemetryMetrics(t *testing.T) { testOperatorMetrics(t, kc, data) testScalableObjectMetrics(t) testScaledObjectPausedMetric(t, data) + testCloudEventEmitted(t, data) + testCloudEventEmittedError(t, data) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) @@ -304,21 +392,28 @@ func TestOpenTelemetryMetrics(t *testing.T) { func getTemplateData() (templateData, []Template) { return templateData{ - TestName: testName, - TestNamespace: testNamespace, - DeploymentName: deploymentName, - ScaledObjectName: scaledObjectName, - WrongScaledObjectName: wrongScaledObjectName, - WrongScalerName: wrongScalerName, - MonitoredDeploymentName: monitoredDeploymentName, - ClientName: clientName, - CronScaledJobName: cronScaledJobName, + TestName: testName, + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + WrongScaledObjectName: wrongScaledObjectName, + WrongScalerName: wrongScalerName, + MonitoredDeploymentName: monitoredDeploymentName, + ClientName: clientName, + CronScaledJobName: cronScaledJobName, + CloudEventSourceName: cloudEventSourceName, + WrongCloudEventSourceName: wrongCloudEventSourceName, + CloudEventHTTPReceiverName: cloudEventHTTPReceiverName, + CloudEventHTTPServiceName: cloudEventHTTPServiceName, + CloudEventHTTPServiceURL: cloudEventHTTPServiceURL, }, []Template{ {Name: "deploymentTemplate", Config: deploymentTemplate}, {Name: "monitoredDeploymentTemplate", Config: monitoredDeploymentTemplate}, {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, {Name: "clientTemplate", Config: clientTemplate}, {Name: "authenticatioNTemplate", Config: authenticationTemplate}, + {Name: "cloudEventHTTPReceiverTemplate", Config: cloudEventHTTPReceiverTemplate}, + {Name: "cloudEventHTTPServiceTemplate", Config: cloudEventHTTPServiceTemplate}, } } @@ -755,3 +850,68 @@ func assertScaledObjectPausedMetric(t *testing.T, families map[string]*prommodel } assert.Equal(t, float64(expectedMetricValue), metricValue) } + +func testCloudEventEmitted(t *testing.T, data templateData) { + t.Log("--- testing cloudevent emitted ---") + + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + KubectlApplyWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + + time.Sleep(10 * time.Second) + family := fetchAndParsePrometheusMetrics(t, fmt.Sprintf("curl --insecure %s", kedaOperatorCollectorPrometheusExportURL)) + + if val, ok := family["keda_cloudeventsource_events_emitted_count_total"]; ok { + var found bool + metrics := val.GetMetric() + for _, metric := range metrics { + labels := metric.GetLabel() + if len(labels) >= 5 && + *labels[0].Value == "opentelemetry-metrics-test-ce" && + *labels[1].Value == "http" && + *labels[3].Value == "opentelemetry-metrics-test-ns" && + *labels[4].Value == "emitted" { + assert.GreaterOrEqual(t, *metric.Counter.Value, float64(1)) + found = true + } + } + assert.Equal(t, true, found) + } else { + t.Errorf("metric not available") + } +} + +func testCloudEventEmittedError(t *testing.T, data templateData) { + t.Log("--- testing cloudevent emitted error ---") + + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + KubectlDeleteWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) + KubectlApplyWithTemplate(t, data, "wrongCloudEventSourceTemplate", wrongCloudEventSourceTemplate) + time.Sleep(1 * time.Second) + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + + time.Sleep(10 * time.Second) + family := fetchAndParsePrometheusMetrics(t, fmt.Sprintf("curl --insecure %s", kedaOperatorCollectorPrometheusExportURL)) + + if val, ok := family["keda_cloudeventsource_events_emitted_count_total"]; ok { + var found bool + metrics := val.GetMetric() + for _, metric := range metrics { + labels := metric.GetLabel() + if len(labels) >= 5 && + *labels[0].Value == "opentelemetry-metrics-test-ce-w" && + *labels[1].Value == "http" && + *labels[3].Value == "opentelemetry-metrics-test-ns" && + *labels[4].Value == "failed" { + assert.GreaterOrEqual(t, *metric.Counter.Value, float64(5)) + found = true + } + } + assert.Equal(t, true, found) + } else { + t.Errorf("metric not available") + } + + KubectlDeleteWithTemplate(t, data, "wrongCloudEventSourceTemplate", wrongCloudEventSourceTemplate) + KubectlApplyWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) +} diff --git a/tests/sequential/prometheus_metrics/prometheus_metrics_test.go b/tests/sequential/prometheus_metrics/prometheus_metrics_test.go index 9f3ef87e70f..96ffc87a540 100644 --- a/tests/sequential/prometheus_metrics/prometheus_metrics_test.go +++ b/tests/sequential/prometheus_metrics/prometheus_metrics_test.go @@ -24,9 +24,14 @@ import ( ) const ( - testName = "prometheus-metrics-test" - labelScaledObject = "scaledObject" - labelType = "type" + testName = "prometheus-metrics-test" + labelScaledObject = "scaledObject" + labelType = "type" + labelCloudEventSource = "cloudeventsource" + eventsink = "eventsink" + eventsinkValue = "prometheus-metrics-test-ce" + eventsinkType = "eventsinktype" + eventsinkTypeValue = "http" ) var ( @@ -38,6 +43,11 @@ var ( wrongScalerName = fmt.Sprintf("%s-wrong-scaler", testName) cronScaledJobName = fmt.Sprintf("%s-cron-sj", testName) clientName = fmt.Sprintf("%s-client", testName) + cloudEventSourceName = fmt.Sprintf("%s-ce", testName) + wrongCloudEventSourceName = fmt.Sprintf("%s-ce-w", testName) + cloudEventHTTPReceiverName = fmt.Sprintf("%s-cloudevent-http-receiver", testName) + cloudEventHTTPServiceName = fmt.Sprintf("%s-cloudevent-http-service", testName) + cloudEventHTTPServiceURL = fmt.Sprintf("http://%s.%s.svc.cluster.local:8899", cloudEventHTTPServiceName, testNamespace) kedaOperatorPrometheusURL = "http://keda-operator.keda.svc.cluster.local:8080/metrics" kedaMetricsServerPrometheusURL = "http://keda-metrics-apiserver.keda.svc.cluster.local:8080/metrics" kedaWebhookPrometheusURL = "http://keda-admission-webhooks.keda.svc.cluster.local:8080/metrics" @@ -45,15 +55,20 @@ var ( ) type templateData struct { - TestName string - TestNamespace string - DeploymentName string - ScaledObjectName string - WrongScaledObjectName string - WrongScalerName string - CronScaledJobName string - MonitoredDeploymentName string - ClientName string + TestName string + TestNamespace string + DeploymentName string + ScaledObjectName string + WrongScaledObjectName string + WrongScalerName string + CronScaledJobName string + MonitoredDeploymentName string + ClientName string + CloudEventSourceName string + WrongCloudEventSourceName string + CloudEventHTTPReceiverName string + CloudEventHTTPServiceName string + CloudEventHTTPServiceURL string } const ( @@ -267,6 +282,78 @@ spec: podSelector: 'app={{.MonitoredDeploymentName}}' value: '1' ` + + cloudEventSourceTemplate = ` +apiVersion: eventing.keda.sh/v1alpha1 +kind: CloudEventSource +metadata: + name: {{.CloudEventSourceName}} + namespace: {{.TestNamespace}} +spec: + clusterName: cluster-sample + destination: + http: + uri: {{.CloudEventHTTPServiceURL}} +` + + wrongCloudEventSourceTemplate = ` +apiVersion: eventing.keda.sh/v1alpha1 +kind: CloudEventSource +metadata: + name: {{.WrongCloudEventSourceName}} + namespace: {{.TestNamespace}} +spec: + clusterName: cluster-sample + destination: + http: + uri: http://fo.wo +` + + cloudEventHTTPServiceTemplate = ` +apiVersion: v1 +kind: Service +metadata: + name: {{.CloudEventHTTPServiceName}} + namespace: {{.TestNamespace}} +spec: + type: ClusterIP + ports: + - protocol: TCP + port: 8899 + targetPort: 8899 + selector: + app: {{.CloudEventHTTPReceiverName}} +` + + cloudEventHTTPReceiverTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + deploy: {{.CloudEventHTTPReceiverName}} + name: {{.CloudEventHTTPReceiverName}} + namespace: {{.TestNamespace}} +spec: + selector: + matchLabels: + app: {{.CloudEventHTTPReceiverName}} + replicas: 1 + template: + metadata: + labels: + app: {{.CloudEventHTTPReceiverName}} + spec: + containers: + - name: httpreceiver + image: ghcr.io/kedacore/tests-cloudevents-http:latest + ports: + - containerPort: 8899 + resources: + requests: + cpu: "200m" + limits: + cpu: "500m" +` ) func TestPrometheusMetrics(t *testing.T) { @@ -294,27 +381,36 @@ func TestPrometheusMetrics(t *testing.T) { testWebhookMetrics(t, data) testScalableObjectMetrics(t) testScaledObjectPausedMetric(t, data) + testCloudEventEmitted(t, data) + testCloudEventEmittedError(t, data) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) } func getTemplateData() (templateData, []Template) { return templateData{ - TestName: testName, - TestNamespace: testNamespace, - DeploymentName: deploymentName, - ScaledObjectName: scaledObjectName, - WrongScaledObjectName: wrongScaledObjectName, - WrongScalerName: wrongScalerName, - MonitoredDeploymentName: monitoredDeploymentName, - ClientName: clientName, - CronScaledJobName: cronScaledJobName, + TestName: testName, + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + WrongScaledObjectName: wrongScaledObjectName, + WrongScalerName: wrongScalerName, + MonitoredDeploymentName: monitoredDeploymentName, + ClientName: clientName, + CronScaledJobName: cronScaledJobName, + CloudEventSourceName: cloudEventSourceName, + WrongCloudEventSourceName: wrongCloudEventSourceName, + CloudEventHTTPReceiverName: cloudEventHTTPReceiverName, + CloudEventHTTPServiceName: cloudEventHTTPServiceName, + CloudEventHTTPServiceURL: cloudEventHTTPServiceURL, }, []Template{ {Name: "deploymentTemplate", Config: deploymentTemplate}, {Name: "monitoredDeploymentTemplate", Config: monitoredDeploymentTemplate}, {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, {Name: "clientTemplate", Config: clientTemplate}, {Name: "authenticatioNTemplate", Config: authenticationTemplate}, + {Name: "cloudEventHTTPReceiverTemplate", Config: cloudEventHTTPReceiverTemplate}, + {Name: "cloudEventHTTPServiceTemplate", Config: cloudEventHTTPServiceTemplate}, } } @@ -879,3 +975,67 @@ func checkMetricServerValues(t *testing.T, families map[string]*prommodel.Metric } assert.GreaterOrEqual(t, metricValue, 1.0, "apiserver_request_total has to be greater than 0") } + +func testCloudEventEmitted(t *testing.T, data templateData) { + t.Log("--- testing cloudevent emitted ---") + + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + KubectlApplyWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + + family := fetchAndParsePrometheusMetrics(t, fmt.Sprintf("curl --insecure %s", kedaOperatorPrometheusURL)) + + if val, ok := family["keda_cloudeventsource_events_emitted_total"]; ok { + var found bool + metrics := val.GetMetric() + for _, metric := range metrics { + labels := metric.GetLabel() + if len(labels) >= 4 && + *labels[0].Value == "prometheus-metrics-test-ce" && + *labels[1].Value == "http" && + *labels[2].Value == "prometheus-metrics-test-ns" && + *labels[3].Value == "emitted" { + assert.GreaterOrEqual(t, *metric.Counter.Value, float64(1)) + found = true + } + } + assert.Equal(t, true, found) + } else { + t.Errorf("metric not available") + } +} + +func testCloudEventEmittedError(t *testing.T, data templateData) { + t.Log("--- testing cloudevent emitted error ---") + + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + KubectlDeleteWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) + KubectlApplyWithTemplate(t, data, "wrongCloudEventSourceTemplate", wrongCloudEventSourceTemplate) + time.Sleep(1 * time.Second) + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + time.Sleep(5 * time.Second) + + family := fetchAndParsePrometheusMetrics(t, fmt.Sprintf("curl --insecure %s", kedaOperatorPrometheusURL)) + + if val, ok := family["keda_cloudeventsource_events_emitted_total"]; ok { + var found bool + metrics := val.GetMetric() + for _, metric := range metrics { + labels := metric.GetLabel() + if len(labels) >= 4 && + *labels[0].Value == "prometheus-metrics-test-ce-w" && + *labels[1].Value == "http" && + *labels[2].Value == "prometheus-metrics-test-ns" && + *labels[3].Value == "failed" { + assert.GreaterOrEqual(t, *metric.Counter.Value, float64(5)) + found = true + } + } + assert.Equal(t, true, found) + } else { + t.Errorf("metric not available") + } + + KubectlDeleteWithTemplate(t, data, "wrongCloudEventSourceTemplate", wrongCloudEventSourceTemplate) + KubectlApplyWithTemplate(t, data, "cloudEventSourceTemplate", cloudEventSourceTemplate) +}