diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index 16506e8b7a..a59e67bf30 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -148,9 +148,9 @@ jobs:
![Assets](https://static.scarf.sh/a.png?x-pxid=d842651a-2e4d-465a-98e1-4808722c01ab)
- uses: actions/checkout@v4
- if: needs.publish-release.semver_check.outputs.make_latest
+ if: steps.semver_check.outputs.make_latest == true
- name: Update Homebrew
- if: needs.publish-release.semver_check.outputs.make_latest
+ if: steps.semver_check.outputs.make_latest == true
run: |
v=${{ github.ref_name }}
packaging/homebrew-package.sh "${v#v}" tel2oss "${{ vars.GH_BOT_USER }}" "${{ vars.GH_BOT_EMAIL }}" "${{ secrets.HOMEBREW_TAP_TOKEN }}"
diff --git a/CHANGELOG.yml b/CHANGELOG.yml
index 47dcae5324..2a05fa5da1 100644
--- a/CHANGELOG.yml
+++ b/CHANGELOG.yml
@@ -33,6 +33,16 @@ docDescription: >-
environments, access to instantaneous feedback loops, and highly
customizable development environments.
items:
+ - version: 2.21.2
+ date: (TBD)
+ notes:
+ - type: bugfix
+ title: Fix panic when agentpf.client creates a Tunnel
+ body: >-
+ A race could occur where several requests where made to `agentpf.client.Tunnel` on a client that had errored
+ when creating its port-forward to the agent. The implementation could handle one such requests but not
+ several, resulting in a panic in situations where multiple simultaneous requests were made to the same client
+ during a very short time period,
- version: 2.21.1
date: 2024-12-17
notes:
diff --git a/docs/release-notes.md b/docs/release-notes.md
index 874cebc7f0..c2adbe31e2 100644
--- a/docs/release-notes.md
+++ b/docs/release-notes.md
@@ -1,6 +1,13 @@
[comment]: # (Code generated by relnotesgen. DO NOT EDIT.)
# Telepresence Release Notes
+## Version 2.21.2
+##
Fix panic when agentpf.client creates a Tunnel
+
+
+A race could occur where several requests where made to `agentpf.client.Tunnel` on a client that had errored when creating its port-forward to the agent. The implementation could handle one such requests but not several, resulting in a panic in situations where multiple simultaneous requests were made to the same client during a very short time period,
+
+
## Version 2.21.1 (December 17)
## [Allow ingest of serverless deployments without specifying an inject-container-ports annotation](https://github.com/telepresenceio/telepresence/issues/3741)
diff --git a/docs/release-notes.mdx b/docs/release-notes.mdx
index 45fa434b15..525d3bf628 100644
--- a/docs/release-notes.mdx
+++ b/docs/release-notes.mdx
@@ -7,6 +7,11 @@ import { Note, Title, Body } from '@site/src/components/ReleaseNotes'
[comment]: # (Code generated by relnotesgen. DO NOT EDIT.)
# Telepresence Release Notes
+## Version 2.21.2
+
+ Fix panic when agentpf.client creates a Tunnel
+ A race could occur where several requests where made to `agentpf.client.Tunnel` on a client that had errored when creating its port-forward to the agent. The implementation could handle one such requests but not several, resulting in a panic in situations where multiple simultaneous requests were made to the same client during a very short time period,
+
## Version 2.21.1 (December 17)
Allow ingest of serverless deployments without specifying an inject-container-ports annotation
diff --git a/pkg/client/agentpf/clients.go b/pkg/client/agentpf/clients.go
index d601d1621f..812e9d5b83 100644
--- a/pkg/client/agentpf/clients.go
+++ b/pkg/client/agentpf/clients.go
@@ -53,6 +53,8 @@ func (ac *client) Tunnel(ctx context.Context, opts ...grpc.CallOption) (tunnel.C
select {
case err, ok := <-ac.ready:
if ok {
+ // Put error back on channel in case this Tunnel is used again before it's deleted.
+ ac.ready <- err
return nil, err
}
// ready channel is closed. We are ready to go.
@@ -74,14 +76,24 @@ func (ac *client) Tunnel(ctx context.Context, opts ...grpc.CallOption) (tunnel.C
}
func (ac *client) connect(ctx context.Context, deleteMe func()) {
- defer close(ac.ready)
dialCtx, dialCancel := context.WithTimeout(ctx, 5*time.Second)
defer dialCancel()
- conn, cli, _, err := k8sclient.ConnectToAgent(dialCtx, ac.info.PodName, ac.info.Namespace, uint16(ac.info.ApiPort))
+ var err error
+ defer func() {
+ if err == nil {
+ close(ac.ready)
+ } else {
+ deleteMe()
+ ac.ready <- err
+ }
+ }()
+
+ var conn *grpc.ClientConn
+ var cli agent.AgentClient
+
+ conn, cli, _, err = k8sclient.ConnectToAgent(dialCtx, ac.info.PodName, ac.info.Namespace, uint16(ac.info.ApiPort))
if err != nil {
- deleteMe()
- ac.ready <- err
return
}
@@ -94,10 +106,7 @@ func (ac *client) connect(ctx context.Context, deleteMe func()) {
intercepted := ac.info.Intercepted
ac.Unlock()
if intercepted {
- if err = ac.startDialWatcherReady(ctx); err != nil {
- deleteMe()
- ac.ready <- err
- }
+ err = ac.startDialWatcherReady(ctx)
}
}
@@ -495,7 +504,7 @@ func (s *clients) updateClients(ctx context.Context, ais []*manager.AgentPodInfo
return oldValue, false
}
ac := &client{
- ready: make(chan error),
+ ready: make(chan error, 1),
session: s.session,
info: ai,
}