diff --git a/instrumentor/controllers/deleteinstrumentedapplication/common.go b/instrumentor/controllers/deleteinstrumentedapplication/common.go index 2825374b5..d88a4ec42 100644 --- a/instrumentor/controllers/deleteinstrumentedapplication/common.go +++ b/instrumentor/controllers/deleteinstrumentedapplication/common.go @@ -27,7 +27,7 @@ func reconcileWorkloadObject(ctx context.Context, kubeClient client.Client, work if err != nil { return err } - if len(sourceList.Items) == 0 { + if len(sourceList.Items) > 0 { return nil } } diff --git a/instrumentor/controllers/deleteinstrumentedapplication/namespace_controller.go b/instrumentor/controllers/deleteinstrumentedapplication/namespace_controller.go index c34ef8b50..e24779184 100644 --- a/instrumentor/controllers/deleteinstrumentedapplication/namespace_controller.go +++ b/instrumentor/controllers/deleteinstrumentedapplication/namespace_controller.go @@ -20,6 +20,7 @@ import ( "context" "errors" + "github.com/odigos-io/odigos/api/odigos/v1alpha1" "github.com/odigos-io/odigos/common/consts" "github.com/odigos-io/odigos/k8sutils/pkg/workload" @@ -150,11 +151,14 @@ func syncGenericWorkloadListToNs(ctx context.Context, c client.Client, kind work } } - if !isInheritingInstrumentationFromNs(freshWorkloadCopy) { + inheriting, err := isInheritingInstrumentationFromNs(ctx, c, freshWorkloadCopy) + if err != nil { + return err + } + if !inheriting { return nil } - var err error err = errors.Join(err, deleteWorkloadInstrumentedApplication(ctx, c, freshWorkloadCopy)) err = errors.Join(err, removeReportedNameAnnotation(ctx, c, freshWorkloadCopy)) return err @@ -164,11 +168,23 @@ func syncGenericWorkloadListToNs(ctx context.Context, c client.Client, kind work // when reconciling the namespace, the usecase is to delete instrumentation for workloads that were only // instrumented due to the label on the namespace. These are workloads with the label missing. // (they inherit the instrumentation from the namespace this way) -func isInheritingInstrumentationFromNs(obj client.Object) bool { +func isInheritingInstrumentationFromNs(ctx context.Context, c client.Client, obj client.Object) (bool, error) { + sourceList, err := v1alpha1.GetSourceListForWorkload(ctx, c, obj) + if err != nil { + return false, err + } + for _, source := range sourceList.Items { + // if a source exists for this workload specifically, then it is not inheriting from Namespace + if source.Spec.Workload.Name == obj.GetName() && + source.Spec.Workload.Kind == workload.WorkloadKind(obj.GetObjectKind().GroupVersionKind().Kind) { + return false, nil + } + } + labels := obj.GetLabels() if labels == nil { - return true + return true, nil } _, exists := labels[consts.OdigosInstrumentationLabel] - return !exists + return !exists, nil } diff --git a/instrumentor/controllers/startlangdetection/source_controller.go b/instrumentor/controllers/startlangdetection/source_controller.go index af4446369..373cd26ba 100644 --- a/instrumentor/controllers/startlangdetection/source_controller.go +++ b/instrumentor/controllers/startlangdetection/source_controller.go @@ -47,6 +47,22 @@ func (r *SourceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return k8sutils.K8SUpdateErrorHandler(err) } + // pre-process existing Sources for specific workloads so we don't have to make a bunch of API calls + // This is used to check if a workload already has an explicit Source, so we don't overwrite its InstrumentationConfig + sourceList := v1alpha1.SourceList{} + err := r.Client.List(ctx, &sourceList, client.InNamespace(source.Spec.Workload.Name)) + if err != nil { + return ctrl.Result{}, err + } + namespaceKindSources := make(map[workload.WorkloadKind]map[string]struct{}) + for _, source := range sourceList.Items { + if _, exists := namespaceKindSources[source.Spec.Workload.Kind]; !exists { + namespaceKindSources[source.Spec.Workload.Kind] = make(map[string]struct{}) + } + // ex: map["Deployment"]["my-app"] = ... + namespaceKindSources[source.Spec.Workload.Kind][source.Spec.Workload.Name] = struct{}{} + } + if source.Spec.Workload.Kind == "Namespace" { var deps appsv1.DeploymentList err = r.Client.List(ctx, &deps, client.InNamespace(source.Spec.Workload.Name)) @@ -56,10 +72,12 @@ func (r *SourceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } for _, dep := range deps.Items { - request := ctrl.Request{NamespacedName: client.ObjectKey{Name: dep.Name, Namespace: dep.Namespace}} - _, err = reconcileWorkload(ctx, r.Client, workload.WorkloadKindDeployment, request, r.Scheme) - if err != nil { - logger.Error(err, "error requesting runtime details from odiglets", "name", dep.Name, "namespace", dep.Namespace) + if _, exists := namespaceKindSources[workload.WorkloadKindDeployment][dep.Name]; !exists { + request := ctrl.Request{NamespacedName: client.ObjectKey{Name: dep.Name, Namespace: dep.Namespace}} + _, err = reconcileWorkload(ctx, r.Client, workload.WorkloadKindDeployment, request, r.Scheme) + if err != nil { + logger.Error(err, "error requesting runtime details from odiglets", "name", dep.Name, "namespace", dep.Namespace) + } } } @@ -71,10 +89,12 @@ func (r *SourceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } for _, st := range sts.Items { - request := ctrl.Request{NamespacedName: client.ObjectKey{Name: st.Name, Namespace: st.Namespace}} - _, err = reconcileWorkload(ctx, r.Client, workload.WorkloadKindStatefulSet, request, r.Scheme) - if err != nil { - logger.Error(err, "error requesting runtime details from odiglets", "name", st.Name, "namespace", st.Namespace) + if _, exists := namespaceKindSources[workload.WorkloadKindStatefulSet][st.Name]; !exists { + request := ctrl.Request{NamespacedName: client.ObjectKey{Name: st.Name, Namespace: st.Namespace}} + _, err = reconcileWorkload(ctx, r.Client, workload.WorkloadKindStatefulSet, request, r.Scheme) + if err != nil { + logger.Error(err, "error requesting runtime details from odiglets", "name", st.Name, "namespace", st.Namespace) + } } } @@ -86,10 +106,12 @@ func (r *SourceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr } for _, ds := range dss.Items { - request := ctrl.Request{NamespacedName: client.ObjectKey{Name: ds.Name, Namespace: ds.Namespace}} - _, err = reconcileWorkload(ctx, r.Client, workload.WorkloadKindDaemonSet, request, r.Scheme) - if err != nil { - logger.Error(err, "error requesting runtime details from odiglets", "name", ds.Name, "namespace", ds.Namespace) + if _, exists := namespaceKindSources[workload.WorkloadKindDaemonSet][ds.Name]; !exists { + request := ctrl.Request{NamespacedName: client.ObjectKey{Name: ds.Name, Namespace: ds.Namespace}} + _, err = reconcileWorkload(ctx, r.Client, workload.WorkloadKindDaemonSet, request, r.Scheme) + if err != nil { + logger.Error(err, "error requesting runtime details from odiglets", "name", ds.Name, "namespace", ds.Namespace) + } } } } else { diff --git a/instrumentor/controllers/startlangdetection/workload_controllers.go b/instrumentor/controllers/startlangdetection/workload_controllers.go index 6abc5bcd1..62e3b7a51 100644 --- a/instrumentor/controllers/startlangdetection/workload_controllers.go +++ b/instrumentor/controllers/startlangdetection/workload_controllers.go @@ -10,7 +10,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" - "github.com/odigos-io/odigos/api/odigos/v1alpha1" odigosv1 "github.com/odigos-io/odigos/api/odigos/v1alpha1" "github.com/odigos-io/odigos/common/consts" "github.com/odigos-io/odigos/k8sutils/pkg/workload" @@ -62,7 +61,7 @@ func reconcileWorkload(ctx context.Context, k8sClient client.Client, objKind wor if !instrumented { // Check if a Source object exists for this workload - sourceList, err := v1alpha1.GetSourceListForWorkload(ctx, k8sClient, obj) + sourceList, err := odigosv1.GetSourceListForWorkload(ctx, k8sClient, obj) if err != nil { return ctrl.Result{}, err } diff --git a/tests/e2e/source/05-assert-runtime-detected.yaml b/tests/e2e/source/05-assert-runtime-detected.yaml new file mode 100644 index 000000000..a2771ca7b --- /dev/null +++ b/tests/e2e/source/05-assert-runtime-detected.yaml @@ -0,0 +1,32 @@ +apiVersion: odigos.io/v1alpha1 +kind: InstrumentedApplication +metadata: + name: deployment-frontend + namespace: default + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: Deployment + name: frontend +spec: + runtimeDetails: + - containerName: frontend + language: java +--- +apiVersion: odigos.io/v1alpha1 +kind: InstrumentationConfig +metadata: + name: deployment-frontend + namespace: default + ownerReferences: + - apiVersion: apps/v1 + blockOwnerDeletion: true + controller: true + kind: Deployment + name: frontend +status: + runtimeDetailsByContainer: + - containerName: frontend + language: java + runtimeVersion: 17.0.11+9 diff --git a/tests/e2e/source/05-source.yaml b/tests/e2e/source/05-source.yaml new file mode 100644 index 000000000..d3382faae --- /dev/null +++ b/tests/e2e/source/05-source.yaml @@ -0,0 +1,12 @@ +apiVersion: odigos.io/v1alpha1 +kind: Source +metadata: + name: frontend + namespace: default + labels: + odigos.io/e2e: source-single +spec: + workload: + name: frontend + namespace: default + kind: Deployment diff --git a/tests/e2e/source/05-workloads.yaml b/tests/e2e/source/05-workloads.yaml new file mode 100644 index 000000000..d86795233 --- /dev/null +++ b/tests/e2e/source/05-workloads.yaml @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "5" + generation: 5 + name: coupon + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "6" # should be only one updated right now + generation: 6 + name: frontend + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "5" + generation: 5 + name: inventory + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "5" + generation: 5 + name: membership + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "5" + generation: 5 + name: pricing + namespace: default \ No newline at end of file diff --git a/tests/e2e/source/06-workloads.yaml b/tests/e2e/source/06-workloads.yaml new file mode 100644 index 000000000..7be83b537 --- /dev/null +++ b/tests/e2e/source/06-workloads.yaml @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "6" + generation: 6 + name: coupon + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "6" + generation: 6 + name: frontend + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "6" + generation: 6 + name: inventory + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "6" + generation: 6 + name: membership + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "6" + generation: 6 + name: pricing + namespace: default \ No newline at end of file diff --git a/tests/e2e/source/07-workloads.yaml b/tests/e2e/source/07-workloads.yaml new file mode 100644 index 000000000..dde612ffa --- /dev/null +++ b/tests/e2e/source/07-workloads.yaml @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "7" + generation: 7 + name: coupon + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "6" + generation: 6 + name: frontend + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "7" + generation: 7 + name: inventory + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "7" + generation: 7 + name: membership + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + deployment.kubernetes.io/revision: "7" + generation: 7 + name: pricing + namespace: default \ No newline at end of file diff --git a/tests/e2e/source/chainsaw-test.yaml b/tests/e2e/source/chainsaw-test.yaml index d192438e9..83cdbc661 100644 --- a/tests/e2e/source/chainsaw-test.yaml +++ b/tests/e2e/source/chainsaw-test.yaml @@ -100,11 +100,22 @@ spec: kubectl rollout status deployment -l app=inventory kubectl rollout status deployment -l app=pricing kubectl rollout status deployment -l app=membership + - name: Simple-demo pods ready for traffic + try: + - script: + timeout: 70s + content: | kubectl wait --for=condition=ready pod -l app=frontend --timeout=60s kubectl wait --for=condition=ready pod -l app=coupon --timeout=60s kubectl wait --for=condition=ready pod -l app=inventory --timeout=60s kubectl wait --for=condition=ready pod -l app=pricing --timeout=60s kubectl wait --for=condition=ready pod -l app=membership --timeout=60s + - name: Wait for apps auto-instrumentation + try: + - script: + timeout: 60s + content: | + sleep 30 - name: Generate Traffic try: - script: @@ -171,7 +182,7 @@ spec: content: | kubectl delete sources --all while true; do - ic_count=$(kubectl get instrumentationconfigs | wc -l) + ic_count=$(kubectl get instrumentationconfigs --output name | wc -l) if [ $ic_count -eq "0" ]; then break fi @@ -190,6 +201,11 @@ spec: kubectl rollout status deployment -l app=inventory kubectl rollout status deployment -l app=pricing kubectl rollout status deployment -l app=membership + - name: Simple-demo pods ready for traffic + try: + - script: + timeout: 70s + content: | kubectl wait --for=condition=ready pod -l app=frontend --timeout=60s kubectl wait --for=condition=ready pod -l app=coupon --timeout=60s kubectl wait --for=condition=ready pod -l app=inventory --timeout=60s @@ -219,12 +235,22 @@ spec: kubectl rollout status deployment -l app=inventory kubectl rollout status deployment -l app=pricing kubectl rollout status deployment -l app=membership + - name: Simple-demo pods ready for traffic + try: + - script: + timeout: 70s + content: | kubectl wait --for=condition=ready pod -l app=frontend --timeout=60s kubectl wait --for=condition=ready pod -l app=coupon --timeout=60s kubectl wait --for=condition=ready pod -l app=inventory --timeout=60s kubectl wait --for=condition=ready pod -l app=pricing --timeout=60s kubectl wait --for=condition=ready pod -l app=membership --timeout=60s - sleep 5 + - name: Wait for apps auto-instrumentation + try: + - script: + timeout: 60s + content: | + sleep 30 - name: Generate Traffic try: - script: @@ -291,7 +317,7 @@ spec: content: | kubectl delete sources --all while true; do - ic_count=$(kubectl get instrumentationconfigs | wc -l) + ic_count=$(kubectl get instrumentationconfigs --output name | wc -l) if [ $ic_count -eq "0" ]; then break fi @@ -301,4 +327,87 @@ spec: try: - assert: timeout: 2m - file: 04-workloads.yaml \ No newline at end of file + file: 04-workloads.yaml + - script: + timeout: 70s + content: | + kubectl rollout status deployment -l app=coupon + kubectl rollout status deployment -l app=frontend + kubectl rollout status deployment -l app=inventory + kubectl rollout status deployment -l app=pricing + kubectl rollout status deployment -l app=membership + - name: Simple-demo pods ready for traffic + try: + - script: + timeout: 70s + content: | + kubectl wait --for=condition=ready pod -l app=frontend --timeout=60s + kubectl wait --for=condition=ready pod -l app=coupon --timeout=60s + kubectl wait --for=condition=ready pod -l app=inventory --timeout=60s + kubectl wait --for=condition=ready pod -l app=pricing --timeout=60s + kubectl wait --for=condition=ready pod -l app=membership --timeout=60s + - name: Instrument frontend workload specifically + try: + - apply: + file: 05-source.yaml + - name: Assert Runtime Detected for single workload + try: + - assert: + timeout: 2m + file: 05-assert-runtime-detected.yaml + - assert: + timeout: 2m + file: 05-workloads.yaml + - name: Single workload instrumented after runtime detection + try: + - script: + timeout: 70s + content: | + kubectl rollout status deployment -l app=coupon + kubectl rollout status deployment -l app=frontend + kubectl rollout status deployment -l app=inventory + kubectl rollout status deployment -l app=pricing + kubectl rollout status deployment -l app=membership + - name: Simple-demo pods ready for traffic + try: + - script: + timeout: 70s + content: | + kubectl wait --for=condition=ready pod -l app=frontend --timeout=60s + kubectl wait --for=condition=ready pod -l app=coupon --timeout=60s + kubectl wait --for=condition=ready pod -l app=inventory --timeout=60s + kubectl wait --for=condition=ready pod -l app=pricing --timeout=60s + kubectl wait --for=condition=ready pod -l app=membership --timeout=60s + - name: Instrument rest of Namespace + try: + - apply: + file: 02-source-ns.yaml + - name: Assert Runtime Detected for all workloads + try: + - assert: + timeout: 2m + file: ../../common/assert/simple-demo-runtime-detected.yaml + - assert: + timeout: 2m + file: 06-workloads.yaml + - name: Uninstrument namespace + try: + - script: + timeout: 60s + content: | + kubectl delete sources/default + while true; do + ic_count=$(kubectl get instrumentationconfigs --output name | wc -l) + if [ $ic_count -eq "1" ]; then + break + fi + sleep 5 + done + - name: Assert Runtime still Detected for single workload + try: + - assert: + timeout: 2m + file: 05-assert-runtime-detected.yaml + - assert: + timeout: 2m + file: 07-workloads.yaml \ No newline at end of file