.enabled=false` when installing the traffic-manager.
+The following are the Helm chart values to disable the workload types:
+
+- `workloads.deployments.enabled=false` for `Deployments`,
+- `workloads.replicaSets.enabled=false` for `ReplicaSets`,
+- `workloads.statefulSets.enabled=false` for `StatefulSets`.
+
### Enable ArgoRollouts
In order to use `ArgoRollouts`, you must pass the Helm chart value `workloads.argoRollouts.enabled=true` when installing the traffic-manager.
diff --git a/docs/release-notes.md b/docs/release-notes.md
index 850c3befb2..c2b57e5c0d 100644
--- a/docs/release-notes.md
+++ b/docs/release-notes.md
@@ -30,6 +30,12 @@ See [Streaming Transitions from SPDY to WebSockets](https://kubernetes.io/blog/2
The OSS code-base will no longer report usage data to the proprietary collector at Ambassador Labs. The actual calls to the collector remain, but will be no-ops unless a proper collector client is installed using an extension point.
+## Add deployments, statefulSets, replicaSets to workloads Helm chart value
+
+
+The Helm chart value workloads
now supports the kinds deployments.enabled
, statefulSets.enabled
, and replicaSets.enabled
. By default, all three are enabled, but can be disabled by setting the corresponding value to false
. When disabled, the traffic-manager will ignore workloads of a corresponding kind, and Telepresence will not be able to intercept them.
+
+
## Version 2.20.2 (October 21)
## Crash in traffic-manager configured with agentInjector.enabled=false
diff --git a/docs/release-notes.mdx b/docs/release-notes.mdx
index 2216b89545..a54156e1ed 100644
--- a/docs/release-notes.mdx
+++ b/docs/release-notes.mdx
@@ -28,6 +28,10 @@ See [Streaming Transitions from SPDY to WebSockets](https://kubernetes.io/blog/2
Make usage data collection configurable using an extension point, and default to no-ops
The OSS code-base will no longer report usage data to the proprietary collector at Ambassador Labs. The actual calls to the collector remain, but will be no-ops unless a proper collector client is installed using an extension point.
+
+ Add deployments, statefulSets, replicaSets to workloads Helm chart value
+ The Helm chart value workloads
now supports the kinds deployments.enabled
, statefulSets.enabled
, and replicaSets.enabled
. By default, all three are enabled, but can be disabled by setting the corresponding value to false
. When disabled, the traffic-manager will ignore workloads of a corresponding kind, and Telepresence will not be able to intercept them.
+
## Version 2.20.2 (October 21)
Crash in traffic-manager configured with agentInjector.enabled=false
diff --git a/integration_test/workload_configuration_test.go b/integration_test/workload_configuration_test.go
new file mode 100644
index 0000000000..599e957ae9
--- /dev/null
+++ b/integration_test/workload_configuration_test.go
@@ -0,0 +1,135 @@
+package integration_test
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/telepresenceio/telepresence/v2/integration_test/itest"
+)
+
+type workloadConfigurationSuite struct {
+ itest.Suite
+ itest.NamespacePair
+}
+
+func (s *workloadConfigurationSuite) SuiteName() string {
+ return "WorkloadConfiguration"
+}
+
+func init() {
+ itest.AddTrafficManagerSuite("-workload-configuration", func(h itest.NamespacePair) itest.TestingSuite {
+ return &workloadConfigurationSuite{Suite: itest.Suite{Harness: h}, NamespacePair: h}
+ })
+}
+
+func (s *workloadConfigurationSuite) disabledWorkloadKind(tp, wl string) {
+ ctx := s.Context()
+ require := s.Require()
+
+ s.ApplyApp(ctx, wl, strings.ToLower(tp)+"/"+wl)
+ defer s.DeleteSvcAndWorkload(ctx, strings.ToLower(tp), wl)
+
+ defer s.uninstallAgents(ctx, wl)
+
+ s.TelepresenceConnect(ctx)
+ defer itest.TelepresenceDisconnectOk(ctx)
+
+ // give it time for the workload to be detected (if it was going to be)
+ time.Sleep(6 * time.Second)
+
+ list := itest.TelepresenceOk(ctx, "list")
+ require.Equal("No Workloads (Deployments, StatefulSets, ReplicaSets, or Rollouts)", list)
+
+ _, stderr, err := itest.Telepresence(ctx, "intercept", wl)
+ require.Error(err)
+ require.Contains(stderr, fmt.Sprintf("connector.CreateIntercept: workload \"%s.%s\" not found", wl, s.NamespacePair.AppNamespace()))
+}
+
+func (s *workloadConfigurationSuite) uninstallAgents(ctx context.Context, wl string) {
+ dfltCtx := itest.WithUser(ctx, "default")
+ itest.TelepresenceOk(dfltCtx, "connect", "--namespace", s.AppNamespace(), "--manager-namespace", s.ManagerNamespace())
+ itest.TelepresenceOk(dfltCtx, "uninstall", "--agent", wl)
+ itest.TelepresenceDisconnectOk(dfltCtx)
+}
+
+func (s *workloadConfigurationSuite) Test_DisabledReplicaSet() {
+ s.TelepresenceHelmInstallOK(s.Context(), true, "--set", "workloads.replicaSets.enabled=false")
+ defer s.TelepresenceHelmInstallOK(s.Context(), true, "--set", "workloads.replicaSets.enabled=true")
+ s.disabledWorkloadKind("ReplicaSet", "rs-echo")
+}
+
+func (s *workloadConfigurationSuite) Test_DisabledStatefulSet() {
+ s.TelepresenceHelmInstallOK(s.Context(), true, "--set", "workloads.statefulSets.enabled=false")
+ defer s.TelepresenceHelmInstallOK(s.Context(), true, "--set", "workloads.statefulSets.enabled=true")
+ s.disabledWorkloadKind("StatefulSet", "ss-echo")
+}
+
+func (s *workloadConfigurationSuite) Test_InterceptsDeploymentWithDisabledReplicaSets() {
+ ctx := s.Context()
+ require := s.Require()
+
+ wl, tp := "echo-one", "Deployment"
+ s.ApplyApp(ctx, wl, strings.ToLower(tp)+"/"+wl)
+ defer s.DeleteSvcAndWorkload(ctx, strings.ToLower(tp), wl)
+
+ s.TelepresenceHelmInstallOK(ctx, true, "--set", "workloads.replicaSets.enabled=false")
+ defer s.TelepresenceHelmInstallOK(ctx, true, "--set", "workloads.replicaSets.enabled=true")
+
+ defer s.uninstallAgents(ctx, wl)
+
+ s.TelepresenceConnect(ctx)
+ defer itest.TelepresenceDisconnectOk(ctx)
+
+ require.Eventually(
+ func() bool {
+ stdout, _, err := itest.Telepresence(ctx, "list")
+ return err == nil && strings.Contains(stdout, fmt.Sprintf("%s: ready to intercept", wl))
+ },
+ 6*time.Second, // waitFor
+ 2*time.Second, // polling interval
+ )
+
+ stdout := itest.TelepresenceOk(ctx, "intercept", wl)
+ require.Contains(stdout, fmt.Sprintf("Using %s %s", tp, wl))
+
+ stdout = itest.TelepresenceOk(ctx, "list", "--intercepts")
+ require.Contains(stdout, fmt.Sprintf("%s: intercepted", wl))
+ itest.TelepresenceOk(ctx, "leave", wl)
+}
+
+func (s *workloadConfigurationSuite) Test_InterceptsReplicaSetWithDisabledDeployments() {
+ ctx := s.Context()
+ require := s.Require()
+
+ wl, tp := "echo-one", "Deployment"
+ s.ApplyApp(ctx, wl, strings.ToLower(tp)+"/"+wl)
+ defer s.DeleteSvcAndWorkload(ctx, strings.ToLower(tp), wl)
+
+ interceptableWl := s.KubectlOk(ctx, "get", "replicasets", "-l", fmt.Sprintf("app=%s", wl), "-o", "jsonpath={.items[*].metadata.name}")
+
+ s.TelepresenceHelmInstallOK(ctx, true, "--set", "workloads.deployments.enabled=false")
+ defer s.TelepresenceHelmInstallOK(ctx, true, "--set", "workloads.deployments.enabled=true")
+
+ defer s.uninstallAgents(ctx, interceptableWl)
+
+ s.TelepresenceConnect(ctx)
+ defer itest.TelepresenceDisconnectOk(ctx)
+
+ require.Eventually(
+ func() bool {
+ stdout, _, err := itest.Telepresence(ctx, "list")
+ return err == nil && strings.Contains(stdout, fmt.Sprintf("%s: ready to intercept", interceptableWl))
+ },
+ 6*time.Second, // waitFor
+ 2*time.Second, // polling interval
+ )
+
+ stdout := itest.TelepresenceOk(ctx, "intercept", interceptableWl)
+ require.Contains(stdout, fmt.Sprintf("Using %s %s", "ReplicaSet", interceptableWl))
+
+ stdout = itest.TelepresenceOk(ctx, "list", "--intercepts")
+ require.Contains(stdout, fmt.Sprintf("%s: intercepted", interceptableWl))
+ itest.TelepresenceOk(ctx, "leave", interceptableWl)
+}
diff --git a/pkg/client/userd/trafficmgr/session.go b/pkg/client/userd/trafficmgr/session.go
index d7cae2dc49..6e9daa0a20 100644
--- a/pkg/client/userd/trafficmgr/session.go
+++ b/pkg/client/userd/trafficmgr/session.go
@@ -1111,22 +1111,32 @@ func (s *session) localWorkloadsWatcher(ctx context.Context, namespace string, s
ctx = informer.WithFactory(ctx, namespace)
fc = informer.GetFactory(ctx, namespace)
}
- workload.StartDeployments(ctx, namespace)
- workload.StartReplicaSets(ctx, namespace)
- workload.StartStatefulSets(ctx, namespace)
- kf := fc.GetK8sInformerFactory()
- kf.Start(ctx.Done())
- rolloutsEnabled := slices.Index(knownWorkloadKinds.Kinds, manager.WorkloadInfo_ROLLOUT) >= 0
- if rolloutsEnabled {
- workload.StartRollouts(ctx, namespace)
- af := fc.GetArgoRolloutsInformerFactory()
- af.Start(ctx.Done())
+ enabledWorkloadKinds := make([]workload.WorkloadKind, len(knownWorkloadKinds.Kinds))
+ for i, kind := range knownWorkloadKinds.Kinds {
+ switch kind {
+ case manager.WorkloadInfo_DEPLOYMENT:
+ enabledWorkloadKinds[i] = workload.DeploymentWorkloadKind
+ workload.StartDeployments(ctx, namespace)
+ case manager.WorkloadInfo_REPLICASET:
+ enabledWorkloadKinds[i] = workload.ReplicaSetWorkloadKind
+ workload.StartReplicaSets(ctx, namespace)
+ case manager.WorkloadInfo_STATEFULSET:
+ enabledWorkloadKinds[i] = workload.StatefulSetWorkloadKind
+ workload.StartStatefulSets(ctx, namespace)
+ case manager.WorkloadInfo_ROLLOUT:
+ enabledWorkloadKinds[i] = workload.RolloutWorkloadKind
+ workload.StartRollouts(ctx, namespace)
+ af := fc.GetArgoRolloutsInformerFactory()
+ af.Start(ctx.Done())
+ }
}
- ww, err := workload.NewWatcher(ctx, namespace, rolloutsEnabled)
+ kf := fc.GetK8sInformerFactory()
+ kf.Start(ctx.Done())
+
+ ww, err := workload.NewWatcher(ctx, namespace, enabledWorkloadKinds)
if err != nil {
- workload.StartRollouts(ctx, namespace)
return err
}
kf.WaitForCacheSync(ctx.Done())
diff --git a/pkg/workload/watcher.go b/pkg/workload/watcher.go
index 5d8fc173cc..88e8023c0f 100644
--- a/pkg/workload/watcher.go
+++ b/pkg/workload/watcher.go
@@ -3,6 +3,7 @@ package workload
import (
"context"
"math"
+ "slices"
"sync"
"time"
@@ -34,6 +35,19 @@ type WorkloadEvent struct {
Workload k8sapi.Workload
}
+type WorkloadKind string
+
+const (
+ DeploymentWorkloadKind WorkloadKind = "Deployment"
+ StatefulSetWorkloadKind WorkloadKind = "StatefulSet"
+ ReplicaSetWorkloadKind WorkloadKind = "ReplicaSet"
+ RolloutWorkloadKind WorkloadKind = "Rollout"
+)
+
+func (w *WorkloadKind) IsValid() bool {
+ return w != nil && slices.Contains([]WorkloadKind{DeploymentWorkloadKind, StatefulSetWorkloadKind, ReplicaSetWorkloadKind, RolloutWorkloadKind}, *w)
+}
+
func (e EventType) String() string {
switch e {
case EventTypeAdd:
@@ -53,17 +67,17 @@ type Watcher interface {
type watcher struct {
sync.Mutex
- namespace string
- subscriptions map[uuid.UUID]chan<- []WorkloadEvent
- timer *time.Timer
- events []WorkloadEvent
- rolloutsEnabled bool
+ namespace string
+ subscriptions map[uuid.UUID]chan<- []WorkloadEvent
+ timer *time.Timer
+ events []WorkloadEvent
+ enabledWorkloadKinds []WorkloadKind
}
-func NewWatcher(ctx context.Context, ns string, rolloutsEnabled bool) (Watcher, error) {
+func NewWatcher(ctx context.Context, ns string, enabledWorkloadKinds []WorkloadKind) (Watcher, error) {
w := new(watcher)
w.namespace = ns
- w.rolloutsEnabled = rolloutsEnabled
+ w.enabledWorkloadKinds = enabledWorkloadKinds
w.subscriptions = make(map[uuid.UUID]chan<- []WorkloadEvent)
w.timer = time.AfterFunc(time.Duration(math.MaxInt64), func() {
w.Lock()
@@ -92,14 +106,17 @@ func NewWatcher(ctx context.Context, ns string, rolloutsEnabled bool) (Watcher,
return w, nil
}
-func hasValidReplicasetOwner(wl k8sapi.Workload, rolloutsEnabled bool) bool {
+func hasValidReplicasetOwner(wl k8sapi.Workload, enabledWorkloadKinds []WorkloadKind) bool {
for _, ref := range wl.GetOwnerReferences() {
if ref.Controller != nil && *ref.Controller {
switch ref.Kind {
case "Deployment":
- return true
+ if slices.Contains(enabledWorkloadKinds, DeploymentWorkloadKind) {
+ return true
+ }
+
case "Rollout":
- if rolloutsEnabled {
+ if slices.Contains(enabledWorkloadKinds, RolloutWorkloadKind) {
return true
}
}
@@ -120,41 +137,47 @@ func (w *watcher) Subscribe(ctx context.Context) <-chan []WorkloadEvent {
kf := informer.GetFactory(ctx, w.namespace)
ai := kf.GetK8sInformerFactory().Apps().V1()
dlog.Debugf(ctx, "workload.Watcher producing initial events for namespace %s", w.namespace)
- if dps, err := ai.Deployments().Lister().Deployments(w.namespace).List(labels.Everything()); err == nil {
- for _, obj := range dps {
- if wl, ok := FromAny(obj); ok && !hasValidReplicasetOwner(wl, w.rolloutsEnabled) && !trafficManagerSelector.Matches(labels.Set(obj.Labels)) {
- initialEvents = append(initialEvents, WorkloadEvent{
- Type: EventTypeAdd,
- Workload: wl,
- })
+ if slices.Contains(w.enabledWorkloadKinds, DeploymentWorkloadKind) {
+ if dps, err := ai.Deployments().Lister().Deployments(w.namespace).List(labels.Everything()); err == nil {
+ for _, obj := range dps {
+ if wl, ok := FromAny(obj); ok && !hasValidReplicasetOwner(wl, w.enabledWorkloadKinds) && !trafficManagerSelector.Matches(labels.Set(obj.Labels)) {
+ initialEvents = append(initialEvents, WorkloadEvent{
+ Type: EventTypeAdd,
+ Workload: wl,
+ })
+ }
}
}
}
- if rps, err := ai.ReplicaSets().Lister().ReplicaSets(w.namespace).List(labels.Everything()); err == nil {
- for _, obj := range rps {
- if wl, ok := FromAny(obj); ok && !hasValidReplicasetOwner(wl, w.rolloutsEnabled) {
- initialEvents = append(initialEvents, WorkloadEvent{
- Type: EventTypeAdd,
- Workload: wl,
- })
+ if slices.Contains(w.enabledWorkloadKinds, ReplicaSetWorkloadKind) {
+ if rps, err := ai.ReplicaSets().Lister().ReplicaSets(w.namespace).List(labels.Everything()); err == nil {
+ for _, obj := range rps {
+ if wl, ok := FromAny(obj); ok && !hasValidReplicasetOwner(wl, w.enabledWorkloadKinds) {
+ initialEvents = append(initialEvents, WorkloadEvent{
+ Type: EventTypeAdd,
+ Workload: wl,
+ })
+ }
}
}
}
- if sps, err := ai.StatefulSets().Lister().StatefulSets(w.namespace).List(labels.Everything()); err == nil {
- for _, obj := range sps {
- if wl, ok := FromAny(obj); ok && !hasValidReplicasetOwner(wl, w.rolloutsEnabled) {
- initialEvents = append(initialEvents, WorkloadEvent{
- Type: EventTypeAdd,
- Workload: wl,
- })
+ if slices.Contains(w.enabledWorkloadKinds, StatefulSetWorkloadKind) {
+ if sps, err := ai.StatefulSets().Lister().StatefulSets(w.namespace).List(labels.Everything()); err == nil {
+ for _, obj := range sps {
+ if wl, ok := FromAny(obj); ok && !hasValidReplicasetOwner(wl, w.enabledWorkloadKinds) {
+ initialEvents = append(initialEvents, WorkloadEvent{
+ Type: EventTypeAdd,
+ Workload: wl,
+ })
+ }
}
}
}
- if w.rolloutsEnabled {
+ if slices.Contains(w.enabledWorkloadKinds, RolloutWorkloadKind) {
ri := kf.GetArgoRolloutsInformerFactory().Argoproj().V1alpha1()
if sps, err := ri.Rollouts().Lister().Rollouts(w.namespace).List(labels.Everything()); err == nil {
for _, obj := range sps {
- if wl, ok := FromAny(obj); ok && !hasValidReplicasetOwner(wl, w.rolloutsEnabled) {
+ if wl, ok := FromAny(obj); ok && !hasValidReplicasetOwner(wl, w.enabledWorkloadKinds) {
initialEvents = append(initialEvents, WorkloadEvent{
Type: EventTypeAdd,
Workload: wl,
@@ -244,24 +267,27 @@ func (w *watcher) watch(ix cache.SharedIndexInformer, ns string, hasValidControl
func (w *watcher) addEventHandler(ctx context.Context, ns string) error {
kf := informer.GetFactory(ctx, ns)
hvc := func(wl k8sapi.Workload) bool {
- return hasValidReplicasetOwner(wl, w.rolloutsEnabled)
+ return hasValidReplicasetOwner(wl, w.enabledWorkloadKinds)
}
ai := kf.GetK8sInformerFactory().Apps().V1()
- if err := w.watch(ai.Deployments().Informer(), ns, hvc); err != nil {
- return err
- }
- if err := w.watch(ai.ReplicaSets().Informer(), ns, hvc); err != nil {
- return err
- }
- if err := w.watch(ai.StatefulSets().Informer(), ns, hvc); err != nil {
- return err
- }
- if !w.rolloutsEnabled {
- dlog.Infof(ctx, "Argo Rollouts is disabled, Argo Rollouts will not be watched")
- } else {
- ri := kf.GetArgoRolloutsInformerFactory().Argoproj().V1alpha1()
- if err := w.watch(ri.Rollouts().Informer(), ns, hvc); err != nil {
+ for _, wlKind := range w.enabledWorkloadKinds {
+ var ssi cache.SharedIndexInformer
+ switch wlKind {
+ case DeploymentWorkloadKind:
+ ssi = ai.Deployments().Informer()
+ case ReplicaSetWorkloadKind:
+ ssi = ai.ReplicaSets().Informer()
+ case StatefulSetWorkloadKind:
+ ssi = ai.StatefulSets().Informer()
+ case RolloutWorkloadKind:
+ ri := kf.GetArgoRolloutsInformerFactory().Argoproj().V1alpha1()
+ ssi = ri.Rollouts().Informer()
+ default:
+ continue
+ }
+
+ if err := w.watch(ssi, ns, hvc); err != nil {
return err
}
}
diff --git a/rpc/manager/manager.proto b/rpc/manager/manager.proto
index 61a382d6a7..40fbd38397 100644
--- a/rpc/manager/manager.proto
+++ b/rpc/manager/manager.proto
@@ -779,8 +779,8 @@ service Manager {
rpc ReviewIntercept(ReviewInterceptRequest) returns (google.protobuf.Empty);
// GetKnownWorkloadKinds returns the known workload kinds
- // that the manager can handle. This base set should always include Deployment, StatefulSet, and ReplicaSet,
- // and it may include Rollout (Argo Rollouts) if the support for it is enabled.
+ // that the manager can handle. This set may include Deployment, StatefulSet, ReplicaSet, Rollout (Argo Rollouts)
+ // as configured in the manager's Helm values.
rpc GetKnownWorkloadKinds(SessionInfo) returns (KnownWorkloadKinds);
// LookupDNS performs a DNS lookup in the cluster. If the caller has intercepts
diff --git a/rpc/manager/manager_grpc.pb.go b/rpc/manager/manager_grpc.pb.go
index ca64eaf312..cfeb97d124 100644
--- a/rpc/manager/manager_grpc.pb.go
+++ b/rpc/manager/manager_grpc.pb.go
@@ -142,8 +142,8 @@ type ManagerClient interface {
// error, and setting a human-readable status message.
ReviewIntercept(ctx context.Context, in *ReviewInterceptRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// GetKnownWorkloadKinds returns the known workload kinds
- // that the manager can handle. This base set should always include Deployment, StatefulSet, and ReplicaSet,
- // and it may include Rollout (Argo Rollouts) if the support for it is enabled.
+ // that the manager can handle. This set may include Deployment, StatefulSet, ReplicaSet, Rollout (Argo Rollouts)
+ // as configured in the manager's Helm values.
GetKnownWorkloadKinds(ctx context.Context, in *SessionInfo, opts ...grpc.CallOption) (*KnownWorkloadKinds, error)
// LookupDNS performs a DNS lookup in the cluster. If the caller has intercepts
// active, the lookup will be performed from the intercepted pods.
@@ -834,8 +834,8 @@ type ManagerServer interface {
// error, and setting a human-readable status message.
ReviewIntercept(context.Context, *ReviewInterceptRequest) (*emptypb.Empty, error)
// GetKnownWorkloadKinds returns the known workload kinds
- // that the manager can handle. This base set should always include Deployment, StatefulSet, and ReplicaSet,
- // and it may include Rollout (Argo Rollouts) if the support for it is enabled.
+ // that the manager can handle. This set may include Deployment, StatefulSet, ReplicaSet, Rollout (Argo Rollouts)
+ // as configured in the manager's Helm values.
GetKnownWorkloadKinds(context.Context, *SessionInfo) (*KnownWorkloadKinds, error)
// LookupDNS performs a DNS lookup in the cluster. If the caller has intercepts
// active, the lookup will be performed from the intercepted pods.