From 8c4cd2eafde8d611fd123a3212e10a94da51a28a Mon Sep 17 00:00:00 2001
From: 7h3-3mp7y-m4n <115151332+7h3-3mp7y-m4n@users.noreply.github.com>
Date: Mon, 10 Jun 2024 19:35:55 +0530
Subject: [PATCH 01/33] =?UTF-8?q?Fix:=20Parallels=20updates=20subscription?=
=?UTF-8?q?=20where=20possible,=20instead=20of=20recrea=E2=80=A6=20(#7965)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* Fix: Parallels updates subscription where possible, instead of recreating it
* Fix: removing not imported pkg
---
pkg/reconciler/parallel/parallel.go | 11 +++++++++--
pkg/reconciler/parallel/parallel_test.go | 21 ++++++++++++---------
2 files changed, 21 insertions(+), 11 deletions(-)
diff --git a/pkg/reconciler/parallel/parallel.go b/pkg/reconciler/parallel/parallel.go
index 26f3521b03c..1e6d674a6a2 100644
--- a/pkg/reconciler/parallel/parallel.go
+++ b/pkg/reconciler/parallel/parallel.go
@@ -22,7 +22,6 @@ import (
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/equality"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -45,6 +44,7 @@ import (
messaginglisters "knative.dev/eventing/pkg/client/listers/messaging/v1"
ducklib "knative.dev/eventing/pkg/duck"
"knative.dev/eventing/pkg/reconciler/parallel/resources"
+ "knative.dev/pkg/kmp"
)
type Reconciler struct {
@@ -216,7 +216,7 @@ func (r *Reconciler) reconcileSubscription(ctx context.Context, branchNumber int
// TODO: Send events here, or elsewhere?
//r.Recorder.Eventf(p, corev1.EventTypeWarning, subscriptionCreateFailed, "Create Parallels's subscription failed: %v", err)
return nil, fmt.Errorf("failed to get Subscription: %s", err)
- } else if !equality.Semantic.DeepDerivative(expected.Spec, sub.Spec) {
+ } else if immutableFieldsChanged := expected.CheckImmutableFields(ctx, sub); immutableFieldsChanged != nil {
// Given that spec.channel is immutable, we cannot just update the subscription. We delete
// it instead, and re-create it.
err = r.eventingClientSet.MessagingV1().Subscriptions(sub.Namespace).Delete(ctx, sub.Name, metav1.DeleteOptions{})
@@ -230,6 +230,13 @@ func (r *Reconciler) reconcileSubscription(ctx context.Context, branchNumber int
return nil, err
}
return newSub, nil
+ } else if equal, err := kmp.SafeEqual(sub.Spec, expected.Spec); !equal || err != nil {
+ updatedSub, err := r.eventingClientSet.MessagingV1().Subscriptions(sub.Namespace).Update(ctx, expected, metav1.UpdateOptions{})
+ if err != nil {
+ logging.FromContext(ctx).Infow("Cannot update subscription", zap.Error(err))
+ return nil, err
+ }
+ return updatedSub, nil
}
return sub, nil
}
diff --git a/pkg/reconciler/parallel/parallel_test.go b/pkg/reconciler/parallel/parallel_test.go
index 54dbec0d76f..8a3dbee44b8 100644
--- a/pkg/reconciler/parallel/parallel_test.go
+++ b/pkg/reconciler/parallel/parallel_test.go
@@ -427,22 +427,25 @@ func TestAllBranches(t *testing.T) {
{Subscriber: createSubscriber(0)},
})))},
WantErr: false,
- WantDeletes: []clientgotesting.DeleteActionImpl{{
- ActionImpl: clientgotesting.ActionImpl{
- Namespace: testNS,
- Resource: v1.SchemeGroupVersion.WithResource("subscriptions"),
+ WantUpdates: []clientgotesting.UpdateActionImpl{
+ {
+ ActionImpl: clientgotesting.ActionImpl{
+ Namespace: testNS,
+ Resource: v1.SchemeGroupVersion.WithResource("subscriptions"),
+ },
+ Object: resources.NewSubscription(0, NewFlowsParallel(parallelName, testNS,
+ WithFlowsParallelChannelTemplateSpec(imc),
+ WithFlowsParallelBranches([]v1.ParallelBranch{
+ {Subscriber: createSubscriber(1)},
+ }))),
},
- Name: resources.ParallelBranchChannelName(parallelName, 0),
- }},
+ },
WantCreates: []runtime.Object{
createChannel(parallelName),
createBranchChannel(parallelName, 0),
resources.NewFilterSubscription(0, NewFlowsParallel(parallelName, testNS, WithFlowsParallelChannelTemplateSpec(imc), WithFlowsParallelBranches([]v1.ParallelBranch{
{Subscriber: createSubscriber(1)},
}))),
- resources.NewSubscription(0, NewFlowsParallel(parallelName, testNS, WithFlowsParallelChannelTemplateSpec(imc), WithFlowsParallelBranches([]v1.ParallelBranch{
- {Subscriber: createSubscriber(1)},
- }))),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewFlowsParallel(parallelName, testNS,
From 3af0fac92d713cf3cc0c869398ea01b031681340 Mon Sep 17 00:00:00 2001
From: Pierangelo Di Pilato
Date: Mon, 10 Jun 2024 17:30:31 +0200
Subject: [PATCH 02/33] Exclude core deployments from SinkBinding webhook
injection (#7986)
Exclude core deployments from webhook injection
Signed-off-by: Pierangelo Di Pilato
---
config/brokers/mt-channel-broker/deployments/broker-filter.yaml | 1 +
config/brokers/mt-channel-broker/deployments/broker-ingress.yaml | 1 +
config/brokers/mt-channel-broker/deployments/controller.yaml | 1 +
config/channels/in-memory-channel/deployments/controller.yaml | 1 +
config/channels/in-memory-channel/deployments/dispatcher.yaml | 1 +
config/core/deployments/controller.yaml | 1 +
config/core/deployments/pingsource-mt-adapter.yaml | 1 +
config/core/deployments/webhook.yaml | 1 +
8 files changed, 8 insertions(+)
diff --git a/config/brokers/mt-channel-broker/deployments/broker-filter.yaml b/config/brokers/mt-channel-broker/deployments/broker-filter.yaml
index 282c814839c..f208d9afa41 100644
--- a/config/brokers/mt-channel-broker/deployments/broker-filter.yaml
+++ b/config/brokers/mt-channel-broker/deployments/broker-filter.yaml
@@ -21,6 +21,7 @@ metadata:
app.kubernetes.io/component: broker-filter
app.kubernetes.io/version: devel
app.kubernetes.io/name: knative-eventing
+ bindings.knative.dev/exclude: "true"
spec:
selector:
matchLabels:
diff --git a/config/brokers/mt-channel-broker/deployments/broker-ingress.yaml b/config/brokers/mt-channel-broker/deployments/broker-ingress.yaml
index 527bca86830..2fec3bdf2ba 100644
--- a/config/brokers/mt-channel-broker/deployments/broker-ingress.yaml
+++ b/config/brokers/mt-channel-broker/deployments/broker-ingress.yaml
@@ -21,6 +21,7 @@ metadata:
app.kubernetes.io/component: broker-ingress
app.kubernetes.io/version: devel
app.kubernetes.io/name: knative-eventing
+ bindings.knative.dev/exclude: "true"
spec:
selector:
matchLabels:
diff --git a/config/brokers/mt-channel-broker/deployments/controller.yaml b/config/brokers/mt-channel-broker/deployments/controller.yaml
index 488f7fd2a83..10d35e44185 100644
--- a/config/brokers/mt-channel-broker/deployments/controller.yaml
+++ b/config/brokers/mt-channel-broker/deployments/controller.yaml
@@ -21,6 +21,7 @@ metadata:
app.kubernetes.io/component: mt-broker-controller
app.kubernetes.io/version: devel
app.kubernetes.io/name: knative-eventing
+ bindings.knative.dev/exclude: "true"
spec:
selector:
matchLabels:
diff --git a/config/channels/in-memory-channel/deployments/controller.yaml b/config/channels/in-memory-channel/deployments/controller.yaml
index 19c1e1e9bb9..08ff23ad9ea 100644
--- a/config/channels/in-memory-channel/deployments/controller.yaml
+++ b/config/channels/in-memory-channel/deployments/controller.yaml
@@ -22,6 +22,7 @@ metadata:
app.kubernetes.io/component: imc-controller
app.kubernetes.io/version: devel
app.kubernetes.io/name: knative-eventing
+ bindings.knative.dev/exclude: "true"
spec:
selector:
matchLabels:
diff --git a/config/channels/in-memory-channel/deployments/dispatcher.yaml b/config/channels/in-memory-channel/deployments/dispatcher.yaml
index 114dbbfaa52..f0eb20d16cc 100644
--- a/config/channels/in-memory-channel/deployments/dispatcher.yaml
+++ b/config/channels/in-memory-channel/deployments/dispatcher.yaml
@@ -22,6 +22,7 @@ metadata:
app.kubernetes.io/component: imc-dispatcher
app.kubernetes.io/version: devel
app.kubernetes.io/name: knative-eventing
+ bindings.knative.dev/exclude: "true"
spec:
selector:
matchLabels:
diff --git a/config/core/deployments/controller.yaml b/config/core/deployments/controller.yaml
index dc7602ef54c..77ff138b53c 100644
--- a/config/core/deployments/controller.yaml
+++ b/config/core/deployments/controller.yaml
@@ -22,6 +22,7 @@ metadata:
app.kubernetes.io/component: eventing-controller
app.kubernetes.io/version: devel
app.kubernetes.io/name: knative-eventing
+ bindings.knative.dev/exclude: "true"
spec:
selector:
matchLabels:
diff --git a/config/core/deployments/pingsource-mt-adapter.yaml b/config/core/deployments/pingsource-mt-adapter.yaml
index 09c4c5e8cf6..33469bdf2d8 100644
--- a/config/core/deployments/pingsource-mt-adapter.yaml
+++ b/config/core/deployments/pingsource-mt-adapter.yaml
@@ -21,6 +21,7 @@ metadata:
app.kubernetes.io/component: pingsource-mt-adapter
app.kubernetes.io/version: devel
app.kubernetes.io/name: knative-eventing
+ bindings.knative.dev/exclude: "true"
spec:
# when set to 0 (and only 0) will be set to 1 when the first PingSource is created.
replicas: 0
diff --git a/config/core/deployments/webhook.yaml b/config/core/deployments/webhook.yaml
index 8b8cd2d3292..e7654254f46 100644
--- a/config/core/deployments/webhook.yaml
+++ b/config/core/deployments/webhook.yaml
@@ -21,6 +21,7 @@ metadata:
app.kubernetes.io/component: eventing-webhook
app.kubernetes.io/version: devel
app.kubernetes.io/name: knative-eventing
+ bindings.knative.dev/exclude: "true"
spec:
selector:
matchLabels:
From d60149dccfb95947fa594aeb4cf3f19e3988ad3b Mon Sep 17 00:00:00 2001
From: Knative Automation
Date: Mon, 10 Jun 2024 11:30:38 -0400
Subject: [PATCH 03/33] [main] Upgrade to latest dependencies (#7987)
upgrade to latest dependencies
bumping google.golang.org/genproto c3f9821...5315273:
> 5315273 chore(all): update all (# 1131)
> a332354 chore(all): auto-regenerate .pb.go files (# 1130)
> d264139 chore(all): update all (# 1128)
> dc85e6b chore(all): auto-regenerate .pb.go files (# 1129)
> fc5f0ca chore: min Go version to 1.20 (# 1127)
> 0867130 chore(all): auto-regenerate .pb.go files (# 1125)
> 024948a chore(all): update all (# 1126)
> 6275950 chore(all): auto-regenerate .pb.go files (# 1124)
> b8a5c65 chore(all): update all (# 1123)
> 8cf5692 chore(all): update all (# 1122)
> 8c6c420 chore(all): update all to 7cd4c1c (# 1111)
> 79826c8 chore(all): auto-regenerate .pb.go files (# 1112)
> 7cd4c1c chore(all): auto-regenerate .pb.go files (# 1110)
> 26222e5 chore(all): update all (# 1109)
> 6f501c4 chore(all): auto-regenerate .pb.go files (# 1107)
bumping golang.org/x/term 46c790f...5f0bb72:
> 5f0bb72 go.mod: update golang.org/x dependencies
bumping google.golang.org/genproto/googleapis/api 0867130...d264139:
> d264139 chore(all): update all (# 1128)
> dc85e6b chore(all): auto-regenerate .pb.go files (# 1129)
> fc5f0ca chore: min Go version to 1.20 (# 1127)
bumping github.com/prometheus/client_model d56cd79...571429e:
> 571429e Merge pull request # 86 from prometheus/repo_sync
> 6fe5007 Merge pull request # 85 from prometheus/repo_sync
> cc727ab Update common Prometheus files
> 64c33c9 Merge pull request # 84 from prometheus/dependabot/go_modules/google.golang.org/protobuf-1.33.0
> bce87c1 Update common Prometheus files
> 5c25993 Merge pull request # 82 from prometheus/repo_sync
> d954a8a Bump google.golang.org/protobuf from 1.32.0 to 1.33.0
> 01ca24c Merge pull request # 81 from prometheus/repo_sync
> bb45f95 Update common Prometheus files
> ccd6823 Update common Prometheus files
bumping golang.org/x/tools cc29c91...bc6931d:
> bc6931d go.mod: update golang.org/x dependencies
> 1767b14 go/ssa: remove code with no effect
> 2088083 internal/gocommand: add more debug info for hanging go commands
> f1a3b12 internal/imports: FixImports should be cancellable
> 4478db0 go/analysis/passes/copylock: suppress error in ill-typed code
> 018d3b2 gopls: warn about Go 1.19 and Go 1.20
> 58cc8a4 gopls/internal/filecache: suppress gc in tests
> b623539 gopls/internal/cache: suppress "internal" import check on Bazel
> 1e9d12d go/packages: pass -overlay to all 'go list' invocations
> 3c293ad internal/cache: invalidate broken imports when package files change
> 5eff1ee gopls/internal/cache: check viewMap before altering views
> da9cad4 go/packages: avoid unnecessary "realpath" on pwd
> 71b7fa9 go/callgraph/vta: save some heap allocations in the trie implementation
> 2f8e378 go/callgraph/vta: remove graph successors method
> 624dbd0 go/analysis/passes/stringintconv: post gotypesalias=1 tweak
> 4669dc7 gopls/internal/test/marker: simplify seedCache file
> 6887e99 gopls/internal/cache: use a better view in viewOfLocked
> bd624fd gopls: make tests tolerant of new go/types error format
> 2e977dd internal/drivertest: evaluate symlink before calling packages.Load
> 8d54ca1 gopls/internal/test/marker: seed the cache before running tests
> 01018ba Revert "gopls/internal/settings: enable semantic tokens by default"
> 019da39 gopls/internal/golang: OutgoingCalls: fix crash on unsafe.Slice
> 30c880d gopls/internal/cache: improve missing import error message
> d017f4a go/packages/internal/drivertest: a package for a fake go/packages driver
> e229045 go/callgraph/vta: avoid some temporary data structures using callbacks
> 0215a5b go/packages: document fields that are part of JSON schema
> f10a0f1 gopls/internal/golang: skip TestFreeRefs on js
> d940b33 gopls/internal/server: support InsertReplaceEdit completion
> e635bfa gopls/internal/golang: unexport more declarations
> 7045d2e go/analysis/passes/nilness: fix bug with MakeInterface(TypeParam)
> e1b14a1 gopls/internal/server: avoid VS Code lightbulb
> 34db5bc gopls: initial support for godebug directive in go.mod and go.work
> 56f50e3 gopls/doc: split codelenses out of settings
> 3629652 gopls/internal/analysis/simplifyrange: suppress on range-over-func
> fb52877 all: sync golang.org/x/telemetry@bda5523
> 4646dbf gopls/internal/protocol: customize InsertReplaceEdit JSON unmarshal
> bc5e086 gopls/internal/golang: unexport several functions
> 32cec11 gopls/internal/test/integration: fix race in TestGCDetails_Toggle
> c3aae99 gopls/doc: tidy up analyzer documentation
> 41211c8 gopls/internal/golang: fix bug in freeRefs algorithm
> 788d39e gopls/internal/golang: "Show free symbols" code action
> f73683e gopls/internal/golang: remove test debugging aix-ppc64 issue
> 0b4dca1 gopls/internal/protocol: separate CodeLens from Command; document
> 8cf8c6f internal/test/integration: materialize startedWork and completedWork
> de1032b gopls: remove dead code
> 499663e all: fix function names in comment
> c184dd7 internal/test/marker: skip basiclit.txt on ppc64
> 1f300c9 gopls: upgrade x/telemetry to pick up CL 586195
> fd7deae gopls/internal/test/marker: fix analyzers.txt test that requires cgo
> af36634 gopls/internal/protocol: rename DocumentChange{s,}
> 42d564a gopls: support four kinds of DocumentChanges
> b92578a x/tools: update to x/telemetry@9ff3ad9
> 987af8b x/tools: update to x/telemetry@ac8fed8
> 069435c gopls/internal/cache: use 1 not 0 for missing line/col info
> 528484d gopls/internal/cache: support overlays
> 2e17129 gopls/doc/generate: add link anchors to each setting
> ab7bc6c gopls: further minor generator simplifications
> d40dfd5 gopls: upload from telemetry.Start, rather than upload.Run
> e8808ed gopls: upgrade x/telemetry to latest
> 0006edc go/ssa: support range-over-func
> 59d9797 gopls/internal/settings: annotate TestVetSuite with NeedsTool("go")
> 487737a gopls/internal/golang: fix another crash in RenderPackageDoc
> 4cfd180 gopls/internal/golang: RenderPackageDoc: fix param truncation crash
> 3e9beb6 gopls/doc/release: add release notes for struct tag hover info
> 24f3b32 gopls/internal/golang: show struct tag when hovering over fields
> 9795fac gopls/internal/server: discard non-file scheme workspace folder URIs
> 3b13d03 gopls/internal/cache: fix bug.Report converting Diagnostic positions
> 7f3a258 gopls/internal/test/integration/misc: disable another staticcheck test
> 8483344 gopls/internal/settings: add framepointer,sigchanyzer analyzers
> 8f9d159 gopls/internal/test/integration/misc: disable staticcheck test
> f38ac9b gopls/internal/test: avoid std assumptions in range-over-func test
> e35e4cc go/ssa: compile range-over-func to panic
> e149e84 gopls: rationalize code generation
> ff28778 gopls/internal/protocol: rationalize edit helpers
> 5daf157 gopls/internal/golang: simplify "rewrite" code actions
> 1718e2d gopls/internal/cache: simplify Snapshot Go commands
> a432b16 gopls/internal/analysis: disable ssa/ir analyzers on range-over-func
> b426bc7 go/packages/packagestest: reflect new modules.txt requirements
> a943a14 go/analysis/passes/directive: do not report adjoining //go:debug
> b020bdb go/callgraph/vta: add type alias test
bumping knative.dev/reconciler-test 1f340aa...2023469:
> 2023469 Update community files (# 735)
> c3147c1 upgrade to latest dependencies (# 734)
bumping golang.org/x/net d27919b...66e838c:
> 66e838c go.mod: update golang.org/x dependencies
> 6249541 http2: avoid race in server handler SetReadDeadine/SetWriteDeadline
> 603e3e6 quic: disable X25519Kyber768Draft00 in tests
> 67e8d0c http2: report an error if goroutines outlive serverTester tests
> 5608279 http2: avoid corruption in priority write scheduler
> 0d515a5 http2: factor out frame read/write test functions
> 9f5b79b http2: drop unused retry function
> 03c24c2 http2: use synthetic time in server tests
> 022530c http2: add a more full-featured test net.Conn
> 410d19e http2: avoid racy access to clientStream.requestedGzip
> 332fe23 http2: remove spec coverage test
> c1f5833 all: replace deprecated io/ioutil calls
> 9545aea http2: clearer distinction between test server types
> b1ec120 http2: use implicit synchronization in tests
> 49bf2d7 proxy: use strings.TrimSuffix
> c87a5b6 http2: set up the timer of closing idle connection after the initialization
> 8aa6dbf http2: cancel handler context on stream errors
> 2c14f51 http2: drop the gate type
> ac99879 webdav: return 409 for PUT without parent collection
> 7fa635b http2: avoid panic on h2c upgrade failure
Signed-off-by: Knative Automation
---
go.mod | 38 +-
go.sum | 76 ++--
vendor/cloud.google.com/go/auth/CHANGES.md | 14 +
.../go/auth/credentials/filetypes.go | 2 +
.../go/auth/grpctransport/grpctransport.go | 2 +-
.../go/auth/httptransport/httptransport.go | 2 +-
.../internal/transport/cert/workload_cert.go | 117 +++++++
vendor/golang.org/x/mod/module/module.go | 2 +
vendor/golang.org/x/net/http2/http2.go | 19 +-
vendor/golang.org/x/net/http2/server.go | 94 +++--
vendor/golang.org/x/net/http2/testsync.go | 331 ------------------
vendor/golang.org/x/net/http2/timer.go | 20 ++
vendor/golang.org/x/net/http2/transport.go | 310 ++++++----------
.../x/net/http2/writesched_priority.go | 4 +-
vendor/golang.org/x/net/proxy/per_host.go | 8 +-
vendor/golang.org/x/oauth2/google/google.go | 5 +-
vendor/golang.org/x/sys/unix/mkerrors.sh | 2 +
vendor/golang.org/x/sys/unix/zerrors_linux.go | 20 +-
.../x/sys/unix/zerrors_linux_386.go | 1 +
.../x/sys/unix/zerrors_linux_amd64.go | 1 +
.../x/sys/unix/zerrors_linux_arm64.go | 1 +
vendor/golang.org/x/sys/unix/ztypes_linux.go | 37 +-
.../x/sys/windows/security_windows.go | 1 +
.../x/sys/windows/zsyscall_windows.go | 9 +
.../x/tools/internal/gocommand/invoke.go | 96 ++++-
.../x/tools/internal/imports/fix.go | 28 +-
.../google.golang.org/api/internal/version.go | 2 +-
.../genproto/googleapis/type/date/date.pb.go | 4 +-
.../genproto/googleapis/type/expr/expr.pb.go | 4 +-
vendor/modules.txt | 42 +--
30 files changed, 607 insertions(+), 685 deletions(-)
create mode 100644 vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
delete mode 100644 vendor/golang.org/x/net/http2/testsync.go
create mode 100644 vendor/golang.org/x/net/http2/timer.go
diff --git a/go.mod b/go.mod
index 3218e0bb388..9bbc20897a6 100644
--- a/go.mod
+++ b/go.mod
@@ -39,7 +39,7 @@ require (
go.uber.org/atomic v1.10.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
- golang.org/x/net v0.25.0
+ golang.org/x/net v0.26.0
golang.org/x/sync v0.7.0
google.golang.org/grpc v1.64.0
google.golang.org/protobuf v1.34.1
@@ -49,16 +49,16 @@ require (
k8s.io/apiserver v0.29.2
k8s.io/client-go v0.29.2
k8s.io/utils v0.0.0-20240102154912-e7106e64919e
- knative.dev/hack v0.0.0-20240529131459-3b6d6441e7ea
- knative.dev/hack/schema v0.0.0-20240529131459-3b6d6441e7ea
- knative.dev/pkg v0.0.0-20240602234151-229e527a1366
- knative.dev/reconciler-test v0.0.0-20240603135328-1f340aa2b068
+ knative.dev/hack v0.0.0-20240607132042-09143140a254
+ knative.dev/hack/schema v0.0.0-20240607132042-09143140a254
+ knative.dev/pkg v0.0.0-20240610120318-15e6cdf2f386
+ knative.dev/reconciler-test v0.0.0-20240607131348-2023469d1158
sigs.k8s.io/yaml v1.4.0
)
require (
cloud.google.com/go v0.114.0 // indirect
- cloud.google.com/go/auth v0.4.2 // indirect
+ cloud.google.com/go/auth v0.5.1 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect
cloud.google.com/go/iam v1.1.8 // indirect
@@ -106,8 +106,8 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
- github.com/prometheus/client_model v0.6.0 // indirect
- github.com/prometheus/common v0.53.0 // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/common v0.54.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/statsd_exporter v0.22.7 // indirect
github.com/rickb777/plural v1.2.1 // indirect
@@ -120,19 +120,19 @@ require (
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.uber.org/automaxprocs v1.5.3 // indirect
- golang.org/x/crypto v0.23.0 // indirect
- golang.org/x/mod v0.17.0 // indirect
- golang.org/x/oauth2 v0.20.0 // indirect
- golang.org/x/sys v0.20.0 // indirect
- golang.org/x/term v0.20.0 // indirect
- golang.org/x/text v0.15.0 // indirect
+ golang.org/x/crypto v0.24.0 // indirect
+ golang.org/x/mod v0.18.0 // indirect
+ golang.org/x/oauth2 v0.21.0 // indirect
+ golang.org/x/sys v0.21.0 // indirect
+ golang.org/x/term v0.21.0 // indirect
+ golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
- golang.org/x/tools v0.21.0 // indirect
+ golang.org/x/tools v0.22.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
- google.golang.org/api v0.182.0 // indirect
- google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect
+ google.golang.org/api v0.183.0 // indirect
+ google.golang.org/genproto v0.0.0-20240528184218-531527333157 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index 91772bbcc58..db5c1f49c54 100644
--- a/go.sum
+++ b/go.sum
@@ -15,8 +15,8 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY=
cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E=
-cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg=
-cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc=
+cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw=
+cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s=
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
@@ -377,16 +377,16 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
-github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
-github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE=
-github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
+github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8=
+github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
@@ -503,8 +503,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
-golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
-golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
+golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
+golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -540,8 +540,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
-golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
+golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -582,8 +582,8 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
-golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
+golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -591,8 +591,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo=
-golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
+golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -657,15 +657,15 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
-golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
-golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
+golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
+golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -676,8 +676,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
-golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -730,8 +730,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
-golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
+golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -760,8 +760,8 @@ google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.182.0 h1:if5fPvudRQ78GeRx3RayIoiuV7modtErPIZC/T2bIvE=
-google.golang.org/api v0.182.0/go.mod h1:cGhjy4caqA5yXRzEhkHI8Y9mfyC2VLTlER2l08xaqtM=
+google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE=
+google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -799,12 +799,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw=
-google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw=
-google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 h1:W5Xj/70xIA4x60O/IFyXivR5MGqblAb8R3w26pnD6No=
-google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.mod h1:vPrPUTsDCYxXWjP7clS81mZ6/803D8K4iM9Ma27VKas=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE=
+google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e h1:SkdGTrROJl2jRGT/Fxv5QUf9jtdKCQh4KQJXbXVLAi0=
+google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e/go.mod h1:LweJcLbyVij6rCex8YunD8DYR5VDonap/jYl3ZRxcIU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -892,14 +892,14 @@ k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/A
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=
k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-knative.dev/hack v0.0.0-20240529131459-3b6d6441e7ea h1:iWW6SNMrVd2hI5Y+ltKIEzXVedoQLL86b23dS5fkvXs=
-knative.dev/hack v0.0.0-20240529131459-3b6d6441e7ea/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
-knative.dev/hack/schema v0.0.0-20240529131459-3b6d6441e7ea h1:P9pZuNr2mFeMpBulKNYucvsEmftf0GzC3/XnPcjEN8k=
-knative.dev/hack/schema v0.0.0-20240529131459-3b6d6441e7ea/go.mod h1:3pWwBLnTZSM9psSgCAvhKOHIPTzqfEMlWRpDu6IYhK0=
-knative.dev/pkg v0.0.0-20240602234151-229e527a1366 h1:dUkGourt1Xva2lS+ffO/mpoIPGEOkzOk5ALF3OtEK1U=
-knative.dev/pkg v0.0.0-20240602234151-229e527a1366/go.mod h1:GHFUP1wtD/bR/c02QADqaAK3odDJh1ddBMvXhq/goy8=
-knative.dev/reconciler-test v0.0.0-20240603135328-1f340aa2b068 h1:7oMsNjlOzjUziy45hGwsvSymgd72EniktGTw8I5hHgU=
-knative.dev/reconciler-test v0.0.0-20240603135328-1f340aa2b068/go.mod h1:uxJT+sJfxS+oZiC7PGWX5YFEUiQkRTUPg5YoyJrvNqs=
+knative.dev/hack v0.0.0-20240607132042-09143140a254 h1:1YFnu3U6dWZg0oxm6GU8kEdA9A+BvSWKJO7sg3N0kq8=
+knative.dev/hack v0.0.0-20240607132042-09143140a254/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
+knative.dev/hack/schema v0.0.0-20240607132042-09143140a254 h1:b9hFHGtxx0Kpm4EEjSD72lL0jms91To3OEVBTbqfOYI=
+knative.dev/hack/schema v0.0.0-20240607132042-09143140a254/go.mod h1:3pWwBLnTZSM9psSgCAvhKOHIPTzqfEMlWRpDu6IYhK0=
+knative.dev/pkg v0.0.0-20240610120318-15e6cdf2f386 h1:nxFTT6DrXr70Zi2BK8nc57ts0/smyavd/uBRBbtqg94=
+knative.dev/pkg v0.0.0-20240610120318-15e6cdf2f386/go.mod h1:l7R8/SteYph0mZDsVgq3fVs4mWp1DaYx9BJJX68U6ik=
+knative.dev/reconciler-test v0.0.0-20240607131348-2023469d1158 h1:5yXdPOh4kh20K0/4p2KKWQd2taPZyo4XGH4wojwFaQg=
+knative.dev/reconciler-test v0.0.0-20240607131348-2023469d1158/go.mod h1:vn4ts7F9M2LrU07Sz0OQW3Ci92rv/JQ/4BW3crU7Xb4=
pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw=
pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md
index 1e7aa479821..7ef5fc0def9 100644
--- a/vendor/cloud.google.com/go/auth/CHANGES.md
+++ b/vendor/cloud.google.com/go/auth/CHANGES.md
@@ -1,5 +1,19 @@
# Changelog
+## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.0...auth/v0.5.1) (2024-05-31)
+
+
+### Bug Fixes
+
+* **auth:** Pass through client to 2LO and 3LO flows ([#10290](https://github.com/googleapis/google-cloud-go/issues/10290)) ([685784e](https://github.com/googleapis/google-cloud-go/commit/685784ea84358c15e9214bdecb307d37aa3b6d2f))
+
+## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.2...auth/v0.5.0) (2024-05-28)
+
+
+### Features
+
+* **auth:** Adds X509 workload certificate provider ([#10233](https://github.com/googleapis/google-cloud-go/issues/10233)) ([17a9db7](https://github.com/googleapis/google-cloud-go/commit/17a9db73af35e3d1a7a25ac4fd1377a103de6150))
+
## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.1...auth/v0.4.2) (2024-05-16)
diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
index a66e56d70f8..fe93557389d 100644
--- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go
+++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
@@ -137,6 +137,7 @@ func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions)
Scopes: opts.scopes(),
TokenURL: f.TokenURL,
Subject: opts.Subject,
+ Client: opts.client(),
}
if opts2LO.TokenURL == "" {
opts2LO.TokenURL = jwtTokenURL
@@ -154,6 +155,7 @@ func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions)
AuthStyle: auth.StyleInParams,
EarlyTokenExpiry: opts.EarlyTokenRefresh,
RefreshToken: f.RefreshToken,
+ Client: opts.client(),
}
return auth.New3LOTokenProvider(opts3LO)
}
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
index 81c956b030b..75bda4c6389 100644
--- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
+++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
@@ -47,7 +47,7 @@ var (
// Options used to configure a [GRPCClientConnPool] from [Dial].
type Options struct {
- // DisableTelemetry disables default telemetry (OpenCensus). An example
+ // DisableTelemetry disables default telemetry (OpenTelemetry). An example
// reason to do so would be to bind custom telemetry that overrides the
// defaults.
DisableTelemetry bool
diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
index 7fea9d87e2c..ef09c1b7523 100644
--- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
+++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
@@ -33,7 +33,7 @@ type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, e
// Options used to configure a [net/http.Client] from [NewClient].
type Options struct {
- // DisableTelemetry disables default telemetry (OpenCensus). An example
+ // DisableTelemetry disables default telemetry (OpenTelemetry). An example
// reason to do so would be to bind custom telemetry that overrides the
// defaults.
DisableTelemetry bool
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
new file mode 100644
index 00000000000..ea1e1febbc2
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
@@ -0,0 +1,117 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cert
+
+import (
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "io"
+ "os"
+
+ "github.com/googleapis/enterprise-certificate-proxy/client/util"
+)
+
+type certConfigs struct {
+ Workload *workloadSource `json:"workload"`
+}
+
+type workloadSource struct {
+ CertPath string `json:"cert_path"`
+ KeyPath string `json:"key_path"`
+}
+
+type certificateConfig struct {
+ CertConfigs certConfigs `json:"cert_configs"`
+}
+
+// NewWorkloadX509CertProvider creates a certificate source
+// that reads a certificate and private key file from the local file system.
+// This is intended to be used for workload identity federation.
+//
+// The configFilePath points to a config file containing relevant parameters
+// such as the certificate and key file paths.
+// If configFilePath is empty, the client will attempt to load the config from
+// a well-known gcloud location.
+func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) {
+ if configFilePath == "" {
+ envFilePath := util.GetConfigFilePathFromEnv()
+ if envFilePath != "" {
+ configFilePath = envFilePath
+ } else {
+ configFilePath = util.GetDefaultConfigFilePath()
+ }
+ }
+
+ certFile, keyFile, err := getCertAndKeyFiles(configFilePath)
+ if err != nil {
+ return nil, err
+ }
+
+ source := &workloadSource{
+ CertPath: certFile,
+ KeyPath: keyFile,
+ }
+ return source.getClientCertificate, nil
+}
+
+// getClientCertificate attempts to load the certificate and key from the files specified in the
+// certificate config.
+func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ cert, err := tls.LoadX509KeyPair(s.CertPath, s.KeyPath)
+ if err != nil {
+ return nil, err
+ }
+ return &cert, nil
+}
+
+// getCertAndKeyFiles attempts to read the provided config file and return the certificate and private
+// key file paths.
+func getCertAndKeyFiles(configFilePath string) (string, string, error) {
+ jsonFile, err := os.Open(configFilePath)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ return "", "", errSourceUnavailable
+ }
+ return "", "", err
+ }
+
+ byteValue, err := io.ReadAll(jsonFile)
+ if err != nil {
+ return "", "", err
+ }
+
+ var config certificateConfig
+ if err := json.Unmarshal(byteValue, &config); err != nil {
+ return "", "", err
+ }
+
+ if config.CertConfigs.Workload == nil {
+ return "", "", errors.New("no Workload Identity Federation certificate information found in the certificate configuration file")
+ }
+
+ certFile := config.CertConfigs.Workload.CertPath
+ keyFile := config.CertConfigs.Workload.KeyPath
+
+ if certFile == "" {
+ return "", "", errors.New("certificate configuration is missing the certificate file location")
+ }
+
+ if keyFile == "" {
+ return "", "", errors.New("certificate configuration is missing the key file location")
+ }
+
+ return certFile, keyFile, nil
+}
diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go
index 2a364b229b9..cac1a899e9c 100644
--- a/vendor/golang.org/x/mod/module/module.go
+++ b/vendor/golang.org/x/mod/module/module.go
@@ -506,6 +506,7 @@ var badWindowsNames = []string{
"PRN",
"AUX",
"NUL",
+ "COM0",
"COM1",
"COM2",
"COM3",
@@ -515,6 +516,7 @@ var badWindowsNames = []string{
"COM7",
"COM8",
"COM9",
+ "LPT0",
"LPT1",
"LPT2",
"LPT3",
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 6f2df281872..003e649f30c 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -17,6 +17,7 @@ package http2 // import "golang.org/x/net/http2"
import (
"bufio"
+ "context"
"crypto/tls"
"fmt"
"io"
@@ -26,6 +27,7 @@ import (
"strconv"
"strings"
"sync"
+ "time"
"golang.org/x/net/http/httpguts"
)
@@ -210,12 +212,6 @@ type stringWriter interface {
WriteString(s string) (n int, err error)
}
-// A gate lets two goroutines coordinate their activities.
-type gate chan struct{}
-
-func (g gate) Done() { g <- struct{}{} }
-func (g gate) Wait() { <-g }
-
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
type closeWaiter chan struct{}
@@ -383,3 +379,14 @@ func validPseudoPath(v string) bool {
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).
type incomparable [0]func()
+
+// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
+// It's defined as an interface here to let us keep synctestGroup entirely test-only
+// and not a part of non-test builds.
+type synctestGroupInterface interface {
+ Join()
+ Now() time.Time
+ NewTimer(d time.Duration) timer
+ AfterFunc(d time.Duration, f func()) timer
+ ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
+}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index c5d08108137..6c349f3ec64 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -154,6 +154,39 @@ type Server struct {
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *serverInternalState
+
+ // Synchronization group used for testing.
+ // Outside of tests, this is nil.
+ group synctestGroupInterface
+}
+
+func (s *Server) markNewGoroutine() {
+ if s.group != nil {
+ s.group.Join()
+ }
+}
+
+func (s *Server) now() time.Time {
+ if s.group != nil {
+ return s.group.Now()
+ }
+ return time.Now()
+}
+
+// newTimer creates a new time.Timer, or a synthetic timer in tests.
+func (s *Server) newTimer(d time.Duration) timer {
+ if s.group != nil {
+ return s.group.NewTimer(d)
+ }
+ return timeTimer{time.NewTimer(d)}
+}
+
+// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
+func (s *Server) afterFunc(d time.Duration, f func()) timer {
+ if s.group != nil {
+ return s.group.AfterFunc(d, f)
+ }
+ return timeTimer{time.AfterFunc(d, f)}
}
func (s *Server) initialConnRecvWindowSize() int32 {
@@ -400,6 +433,10 @@ func (o *ServeConnOpts) handler() http.Handler {
//
// The opts parameter is optional. If nil, default values are used.
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ s.serveConn(c, opts, nil)
+}
+
+func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) {
baseCtx, cancel := serverConnBaseContext(c, opts)
defer cancel()
@@ -426,6 +463,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
pushEnabled: true,
sawClientPreface: opts.SawClientPreface,
}
+ if newf != nil {
+ newf(sc)
+ }
s.state.registerConn(sc)
defer s.state.unregisterConn(sc)
@@ -599,8 +639,8 @@ type serverConn struct {
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write
goAwayCode ErrCode
- shutdownTimer *time.Timer // nil until used
- idleTimer *time.Timer // nil if unused
+ shutdownTimer timer // nil until used
+ idleTimer timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -649,12 +689,12 @@ type stream struct {
flow outflow // limits writing from Handler to client
inflow inflow // what the client is allowed to POST/etc to us
state streamState
- resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- readDeadline *time.Timer // nil if unused
- writeDeadline *time.Timer // nil if unused
- closeErr error // set before cw is closed
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ readDeadline timer // nil if unused
+ writeDeadline timer // nil if unused
+ closeErr error // set before cw is closed
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
@@ -811,8 +851,9 @@ type readFrameResult struct {
// consumer is done with the frame.
// It's run on its own goroutine.
func (sc *serverConn) readFrames() {
- gate := make(gate)
- gateDone := gate.Done
+ sc.srv.markNewGoroutine()
+ gate := make(chan struct{})
+ gateDone := func() { gate <- struct{}{} }
for {
f, err := sc.framer.ReadFrame()
select {
@@ -843,6 +884,7 @@ type frameWriteResult struct {
// At most one goroutine can be running writeFrameAsync at a time per
// serverConn.
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
+ sc.srv.markNewGoroutine()
var err error
if wd == nil {
err = wr.write.writeFrame(sc)
@@ -922,13 +964,13 @@ func (sc *serverConn) serve() {
sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout > 0 {
- sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
}
go sc.readFrames() // closed by defer sc.conn.Close above
- settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+ settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
loopNum := 0
@@ -1057,10 +1099,10 @@ func (sc *serverConn) readPreface() error {
errc <- nil
}
}()
- timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
+ timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
defer timer.Stop()
select {
- case <-timer.C:
+ case <-timer.C():
return errPrefaceTimeout
case err := <-errc:
if err == nil {
@@ -1425,7 +1467,7 @@ func (sc *serverConn) goAway(code ErrCode) {
func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
- sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
+ sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
}
func (sc *serverConn) resetStream(se StreamError) {
@@ -1639,7 +1681,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
delete(sc.streams, st.id)
if len(sc.streams) == 0 {
sc.setConnState(http.StateIdle)
- if sc.srv.IdleTimeout > 0 {
+ if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil {
sc.idleTimer.Reset(sc.srv.IdleTimeout)
}
if h1ServerKeepAlivesDisabled(sc.hs) {
@@ -1661,6 +1703,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
}
}
st.closeErr = err
+ st.cancelCtx()
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
sc.writeSched.CloseStream(st.id)
}
@@ -2021,7 +2064,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{})
- st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+ st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
return sc.scheduleHandler(id, rw, req, handler)
@@ -2119,7 +2162,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.init(sc.srv.initialStreamRecvWindowSize())
if sc.hs.WriteTimeout > 0 {
- st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st
@@ -2343,6 +2386,7 @@ func (sc *serverConn) handlerDone() {
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
+ sc.srv.markNewGoroutine()
defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true
defer func() {
@@ -2639,7 +2683,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
var date string
if _, ok := rws.snapHeader["Date"]; !ok {
// TODO(bradfitz): be faster here, like net/http? measure.
- date = time.Now().UTC().Format(http.TimeFormat)
+ date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
}
for _, v := range rws.snapHeader["Trailer"] {
@@ -2761,7 +2805,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(time.Now()) {
+ if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onReadTimeout()
@@ -2777,9 +2821,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
if deadline.IsZero() {
st.readDeadline = nil
} else if st.readDeadline == nil {
- st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
+ st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
} else {
- st.readDeadline.Reset(deadline.Sub(time.Now()))
+ st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
}
})
return nil
@@ -2787,7 +2831,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(time.Now()) {
+ if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onWriteTimeout()
@@ -2803,9 +2847,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
if deadline.IsZero() {
st.writeDeadline = nil
} else if st.writeDeadline == nil {
- st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
+ st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
} else {
- st.writeDeadline.Reset(deadline.Sub(time.Now()))
+ st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
}
})
return nil
diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go
deleted file mode 100644
index 61075bd16d3..00000000000
--- a/vendor/golang.org/x/net/http2/testsync.go
+++ /dev/null
@@ -1,331 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package http2
-
-import (
- "context"
- "sync"
- "time"
-)
-
-// testSyncHooks coordinates goroutines in tests.
-//
-// For example, a call to ClientConn.RoundTrip involves several goroutines, including:
-// - the goroutine running RoundTrip;
-// - the clientStream.doRequest goroutine, which writes the request; and
-// - the clientStream.readLoop goroutine, which reads the response.
-//
-// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines
-// are blocked waiting for some condition such as reading the Request.Body or waiting for
-// flow control to become available.
-//
-// The testSyncHooks also manage timers and synthetic time in tests.
-// This permits us to, for example, start a request and cause it to time out waiting for
-// response headers without resorting to time.Sleep calls.
-type testSyncHooks struct {
- // active/inactive act as a mutex and condition variable.
- //
- // - neither chan contains a value: testSyncHooks is locked.
- // - active contains a value: unlocked, and at least one goroutine is not blocked
- // - inactive contains a value: unlocked, and all goroutines are blocked
- active chan struct{}
- inactive chan struct{}
-
- // goroutine counts
- total int // total goroutines
- condwait map[*sync.Cond]int // blocked in sync.Cond.Wait
- blocked []*testBlockedGoroutine // otherwise blocked
-
- // fake time
- now time.Time
- timers []*fakeTimer
-
- // Transport testing: Report various events.
- newclientconn func(*ClientConn)
- newstream func(*clientStream)
-}
-
-// testBlockedGoroutine is a blocked goroutine.
-type testBlockedGoroutine struct {
- f func() bool // blocked until f returns true
- ch chan struct{} // closed when unblocked
-}
-
-func newTestSyncHooks() *testSyncHooks {
- h := &testSyncHooks{
- active: make(chan struct{}, 1),
- inactive: make(chan struct{}, 1),
- condwait: map[*sync.Cond]int{},
- }
- h.inactive <- struct{}{}
- h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)
- return h
-}
-
-// lock acquires the testSyncHooks mutex.
-func (h *testSyncHooks) lock() {
- select {
- case <-h.active:
- case <-h.inactive:
- }
-}
-
-// waitInactive waits for all goroutines to become inactive.
-func (h *testSyncHooks) waitInactive() {
- for {
- <-h.inactive
- if !h.unlock() {
- break
- }
- }
-}
-
-// unlock releases the testSyncHooks mutex.
-// It reports whether any goroutines are active.
-func (h *testSyncHooks) unlock() (active bool) {
- // Look for a blocked goroutine which can be unblocked.
- blocked := h.blocked[:0]
- unblocked := false
- for _, b := range h.blocked {
- if !unblocked && b.f() {
- unblocked = true
- close(b.ch)
- } else {
- blocked = append(blocked, b)
- }
- }
- h.blocked = blocked
-
- // Count goroutines blocked on condition variables.
- condwait := 0
- for _, count := range h.condwait {
- condwait += count
- }
-
- if h.total > condwait+len(blocked) {
- h.active <- struct{}{}
- return true
- } else {
- h.inactive <- struct{}{}
- return false
- }
-}
-
-// goRun starts a new goroutine.
-func (h *testSyncHooks) goRun(f func()) {
- h.lock()
- h.total++
- h.unlock()
- go func() {
- defer func() {
- h.lock()
- h.total--
- h.unlock()
- }()
- f()
- }()
-}
-
-// blockUntil indicates that a goroutine is blocked waiting for some condition to become true.
-// It waits until f returns true before proceeding.
-//
-// Example usage:
-//
-// h.blockUntil(func() bool {
-// // Is the context done yet?
-// select {
-// case <-ctx.Done():
-// default:
-// return false
-// }
-// return true
-// })
-// // Wait for the context to become done.
-// <-ctx.Done()
-//
-// The function f passed to blockUntil must be non-blocking and idempotent.
-func (h *testSyncHooks) blockUntil(f func() bool) {
- if f() {
- return
- }
- ch := make(chan struct{})
- h.lock()
- h.blocked = append(h.blocked, &testBlockedGoroutine{
- f: f,
- ch: ch,
- })
- h.unlock()
- <-ch
-}
-
-// broadcast is sync.Cond.Broadcast.
-func (h *testSyncHooks) condBroadcast(cond *sync.Cond) {
- h.lock()
- delete(h.condwait, cond)
- h.unlock()
- cond.Broadcast()
-}
-
-// broadcast is sync.Cond.Wait.
-func (h *testSyncHooks) condWait(cond *sync.Cond) {
- h.lock()
- h.condwait[cond]++
- h.unlock()
-}
-
-// newTimer creates a new fake timer.
-func (h *testSyncHooks) newTimer(d time.Duration) timer {
- h.lock()
- defer h.unlock()
- t := &fakeTimer{
- hooks: h,
- when: h.now.Add(d),
- c: make(chan time.Time),
- }
- h.timers = append(h.timers, t)
- return t
-}
-
-// afterFunc creates a new fake AfterFunc timer.
-func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer {
- h.lock()
- defer h.unlock()
- t := &fakeTimer{
- hooks: h,
- when: h.now.Add(d),
- f: f,
- }
- h.timers = append(h.timers, t)
- return t
-}
-
-func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
- ctx, cancel := context.WithCancel(ctx)
- t := h.afterFunc(d, cancel)
- return ctx, func() {
- t.Stop()
- cancel()
- }
-}
-
-func (h *testSyncHooks) timeUntilEvent() time.Duration {
- h.lock()
- defer h.unlock()
- var next time.Time
- for _, t := range h.timers {
- if next.IsZero() || t.when.Before(next) {
- next = t.when
- }
- }
- if d := next.Sub(h.now); d > 0 {
- return d
- }
- return 0
-}
-
-// advance advances time and causes synthetic timers to fire.
-func (h *testSyncHooks) advance(d time.Duration) {
- h.lock()
- defer h.unlock()
- h.now = h.now.Add(d)
- timers := h.timers[:0]
- for _, t := range h.timers {
- t := t // remove after go.mod depends on go1.22
- t.mu.Lock()
- switch {
- case t.when.After(h.now):
- timers = append(timers, t)
- case t.when.IsZero():
- // stopped timer
- default:
- t.when = time.Time{}
- if t.c != nil {
- close(t.c)
- }
- if t.f != nil {
- h.total++
- go func() {
- defer func() {
- h.lock()
- h.total--
- h.unlock()
- }()
- t.f()
- }()
- }
- }
- t.mu.Unlock()
- }
- h.timers = timers
-}
-
-// A timer wraps a time.Timer, or a synthetic equivalent in tests.
-// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires.
-type timer interface {
- C() <-chan time.Time
- Stop() bool
- Reset(d time.Duration) bool
-}
-
-// timeTimer implements timer using real time.
-type timeTimer struct {
- t *time.Timer
- c chan time.Time
-}
-
-// newTimeTimer creates a new timer using real time.
-func newTimeTimer(d time.Duration) timer {
- ch := make(chan time.Time)
- t := time.AfterFunc(d, func() {
- close(ch)
- })
- return &timeTimer{t, ch}
-}
-
-// newTimeAfterFunc creates an AfterFunc timer using real time.
-func newTimeAfterFunc(d time.Duration, f func()) timer {
- return &timeTimer{
- t: time.AfterFunc(d, f),
- }
-}
-
-func (t timeTimer) C() <-chan time.Time { return t.c }
-func (t timeTimer) Stop() bool { return t.t.Stop() }
-func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) }
-
-// fakeTimer implements timer using fake time.
-type fakeTimer struct {
- hooks *testSyncHooks
-
- mu sync.Mutex
- when time.Time // when the timer will fire
- c chan time.Time // closed when the timer fires; mutually exclusive with f
- f func() // called when the timer fires; mutually exclusive with c
-}
-
-func (t *fakeTimer) C() <-chan time.Time { return t.c }
-
-func (t *fakeTimer) Stop() bool {
- t.mu.Lock()
- defer t.mu.Unlock()
- stopped := t.when.IsZero()
- t.when = time.Time{}
- return stopped
-}
-
-func (t *fakeTimer) Reset(d time.Duration) bool {
- if t.c != nil || t.f == nil {
- panic("fakeTimer only supports Reset on AfterFunc timers")
- }
- t.mu.Lock()
- defer t.mu.Unlock()
- t.hooks.lock()
- defer t.hooks.unlock()
- active := !t.when.IsZero()
- t.when = t.hooks.now.Add(d)
- if !active {
- t.hooks.timers = append(t.hooks.timers, t)
- }
- return active
-}
diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go
new file mode 100644
index 00000000000..0b1c17b8129
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/timer.go
@@ -0,0 +1,20 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package http2
+
+import "time"
+
+// A timer is a time.Timer, as an interface which can be replaced in tests.
+type timer = interface {
+ C() <-chan time.Time
+ Reset(d time.Duration) bool
+ Stop() bool
+}
+
+// timeTimer adapts a time.Timer to the timer interface.
+type timeTimer struct {
+ *time.Timer
+}
+
+func (t timeTimer) C() <-chan time.Time { return t.Timer.C }
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 2fa49490c9d..98a49c6b6ee 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -185,7 +185,45 @@ type Transport struct {
connPoolOnce sync.Once
connPoolOrDef ClientConnPool // non-nil version of ConnPool
- syncHooks *testSyncHooks
+ *transportTestHooks
+}
+
+// Hook points used for testing.
+// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations.
+// Inside tests, see the testSyncHooks function docs.
+
+type transportTestHooks struct {
+ newclientconn func(*ClientConn)
+ group synctestGroupInterface
+}
+
+func (t *Transport) markNewGoroutine() {
+ if t != nil && t.transportTestHooks != nil {
+ t.transportTestHooks.group.Join()
+ }
+}
+
+// newTimer creates a new time.Timer, or a synthetic timer in tests.
+func (t *Transport) newTimer(d time.Duration) timer {
+ if t.transportTestHooks != nil {
+ return t.transportTestHooks.group.NewTimer(d)
+ }
+ return timeTimer{time.NewTimer(d)}
+}
+
+// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
+func (t *Transport) afterFunc(d time.Duration, f func()) timer {
+ if t.transportTestHooks != nil {
+ return t.transportTestHooks.group.AfterFunc(d, f)
+ }
+ return timeTimer{time.AfterFunc(d, f)}
+}
+
+func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
+ if t.transportTestHooks != nil {
+ return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
+ }
+ return context.WithTimeout(ctx, d)
}
func (t *Transport) maxHeaderListSize() uint32 {
@@ -352,60 +390,6 @@ type ClientConn struct {
werr error // first write error that has occurred
hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder
-
- syncHooks *testSyncHooks // can be nil
-}
-
-// Hook points used for testing.
-// Outside of tests, cc.syncHooks is nil and these all have minimal implementations.
-// Inside tests, see the testSyncHooks function docs.
-
-// goRun starts a new goroutine.
-func (cc *ClientConn) goRun(f func()) {
- if cc.syncHooks != nil {
- cc.syncHooks.goRun(f)
- return
- }
- go f()
-}
-
-// condBroadcast is cc.cond.Broadcast.
-func (cc *ClientConn) condBroadcast() {
- if cc.syncHooks != nil {
- cc.syncHooks.condBroadcast(cc.cond)
- }
- cc.cond.Broadcast()
-}
-
-// condWait is cc.cond.Wait.
-func (cc *ClientConn) condWait() {
- if cc.syncHooks != nil {
- cc.syncHooks.condWait(cc.cond)
- }
- cc.cond.Wait()
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (cc *ClientConn) newTimer(d time.Duration) timer {
- if cc.syncHooks != nil {
- return cc.syncHooks.newTimer(d)
- }
- return newTimeTimer(d)
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer {
- if cc.syncHooks != nil {
- return cc.syncHooks.afterFunc(d, f)
- }
- return newTimeAfterFunc(d, f)
-}
-
-func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
- if cc.syncHooks != nil {
- return cc.syncHooks.contextWithTimeout(ctx, d)
- }
- return context.WithTimeout(ctx, d)
}
// clientStream is the state for a single HTTP/2 stream. One of these
@@ -487,7 +471,7 @@ func (cs *clientStream) abortStreamLocked(err error) {
// TODO(dneil): Clean up tests where cs.cc.cond is nil.
if cs.cc.cond != nil {
// Wake up writeRequestBody if it is waiting on flow control.
- cs.cc.condBroadcast()
+ cs.cc.cond.Broadcast()
}
}
@@ -497,7 +481,7 @@ func (cs *clientStream) abortRequestBodyWrite() {
defer cc.mu.Unlock()
if cs.reqBody != nil && cs.reqBodyClosed == nil {
cs.closeReqBodyLocked()
- cc.condBroadcast()
+ cc.cond.Broadcast()
}
}
@@ -507,10 +491,11 @@ func (cs *clientStream) closeReqBodyLocked() {
}
cs.reqBodyClosed = make(chan struct{})
reqBodyClosed := cs.reqBodyClosed
- cs.cc.goRun(func() {
+ go func() {
+ cs.cc.t.markNewGoroutine()
cs.reqBody.Close()
close(reqBodyClosed)
- })
+ }()
}
type stickyErrWriter struct {
@@ -626,21 +611,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64())
d := time.Second * time.Duration(backoff)
- var tm timer
- if t.syncHooks != nil {
- tm = t.syncHooks.newTimer(d)
- t.syncHooks.blockUntil(func() bool {
- select {
- case <-tm.C():
- case <-req.Context().Done():
- default:
- return false
- }
- return true
- })
- } else {
- tm = newTimeTimer(d)
- }
+ tm := t.newTimer(d)
select {
case <-tm.C():
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
@@ -725,8 +696,8 @@ func canRetryError(err error) bool {
}
func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
- if t.syncHooks != nil {
- return t.newClientConn(nil, singleUse, t.syncHooks)
+ if t.transportTestHooks != nil {
+ return t.newClientConn(nil, singleUse)
}
host, _, err := net.SplitHostPort(addr)
if err != nil {
@@ -736,7 +707,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b
if err != nil {
return nil, err
}
- return t.newClientConn(tconn, singleUse, nil)
+ return t.newClientConn(tconn, singleUse)
}
func (t *Transport) newTLSConfig(host string) *tls.Config {
@@ -802,10 +773,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 {
}
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
- return t.newClientConn(c, t.disableKeepAlives(), nil)
+ return t.newClientConn(c, t.disableKeepAlives())
}
-func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) {
+func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
cc := &ClientConn{
t: t,
tconn: c,
@@ -820,16 +791,12 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo
wantSettingsAck: true,
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
- syncHooks: hooks,
}
- if hooks != nil {
- hooks.newclientconn(cc)
+ if t.transportTestHooks != nil {
+ t.markNewGoroutine()
+ t.transportTestHooks.newclientconn(cc)
c = cc.tconn
}
- if d := t.idleConnTimeout(); d != 0 {
- cc.idleTimeout = d
- cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout)
- }
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
}
@@ -893,7 +860,13 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo
return nil, cc.werr
}
- cc.goRun(cc.readLoop)
+ // Start the idle timer after the connection is fully initialized.
+ if d := t.idleConnTimeout(); d != 0 {
+ cc.idleTimeout = d
+ cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout)
+ }
+
+ go cc.readLoop()
return cc, nil
}
@@ -901,7 +874,7 @@ func (cc *ClientConn) healthCheck() {
pingTimeout := cc.t.pingTimeout()
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received.
- ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout)
+ ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
defer cancel()
cc.vlogf("http2: Transport sending health check")
err := cc.Ping(ctx)
@@ -1144,7 +1117,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
// Wait for all in-flight streams to complete or connection to close
done := make(chan struct{})
cancelled := false // guarded by cc.mu
- cc.goRun(func() {
+ go func() {
+ cc.t.markNewGoroutine()
cc.mu.Lock()
defer cc.mu.Unlock()
for {
@@ -1156,9 +1130,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
if cancelled {
break
}
- cc.condWait()
+ cc.cond.Wait()
}
- })
+ }()
shutdownEnterWaitStateHook()
select {
case <-done:
@@ -1168,7 +1142,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
cc.mu.Lock()
// Free the goroutine above
cancelled = true
- cc.condBroadcast()
+ cc.cond.Broadcast()
cc.mu.Unlock()
return ctx.Err()
}
@@ -1206,7 +1180,7 @@ func (cc *ClientConn) closeForError(err error) {
for _, cs := range cc.streams {
cs.abortStreamLocked(err)
}
- cc.condBroadcast()
+ cc.cond.Broadcast()
cc.mu.Unlock()
cc.closeConn()
}
@@ -1321,23 +1295,30 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
respHeaderRecv: make(chan struct{}),
donec: make(chan struct{}),
}
- cc.goRun(func() {
- cs.doRequest(req)
- })
+
+ // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+ if !cc.t.disableCompression() &&
+ req.Header.Get("Accept-Encoding") == "" &&
+ req.Header.Get("Range") == "" &&
+ !cs.isHead {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: https://zlib.net/zlib_faq.html#faq39
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ cs.requestedGzip = true
+ }
+
+ go cs.doRequest(req, streamf)
waitDone := func() error {
- if cc.syncHooks != nil {
- cc.syncHooks.blockUntil(func() bool {
- select {
- case <-cs.donec:
- case <-ctx.Done():
- case <-cs.reqCancel:
- default:
- return false
- }
- return true
- })
- }
select {
case <-cs.donec:
return nil
@@ -1398,24 +1379,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
return err
}
- if streamf != nil {
- streamf(cs)
- }
-
for {
- if cc.syncHooks != nil {
- cc.syncHooks.blockUntil(func() bool {
- select {
- case <-cs.respHeaderRecv:
- case <-cs.abort:
- case <-ctx.Done():
- case <-cs.reqCancel:
- default:
- return false
- }
- return true
- })
- }
select {
case <-cs.respHeaderRecv:
return handleResponseHeaders()
@@ -1445,8 +1409,9 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
// doRequest runs for the duration of the request lifetime.
//
// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
-func (cs *clientStream) doRequest(req *http.Request) {
- err := cs.writeRequest(req)
+func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
+ cs.cc.t.markNewGoroutine()
+ err := cs.writeRequest(req, streamf)
cs.cleanupWriteRequest(err)
}
@@ -1457,7 +1422,7 @@ func (cs *clientStream) doRequest(req *http.Request) {
//
// It returns non-nil if the request ends otherwise.
// If the returned error is StreamError, the error Code may be used in resetting the stream.
-func (cs *clientStream) writeRequest(req *http.Request) (err error) {
+func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) {
cc := cs.cc
ctx := cs.ctx
@@ -1471,21 +1436,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
if cc.reqHeaderMu == nil {
panic("RoundTrip on uninitialized ClientConn") // for tests
}
- var newStreamHook func(*clientStream)
- if cc.syncHooks != nil {
- newStreamHook = cc.syncHooks.newstream
- cc.syncHooks.blockUntil(func() bool {
- select {
- case cc.reqHeaderMu <- struct{}{}:
- <-cc.reqHeaderMu
- case <-cs.reqCancel:
- case <-ctx.Done():
- default:
- return false
- }
- return true
- })
- }
select {
case cc.reqHeaderMu <- struct{}{}:
case <-cs.reqCancel:
@@ -1510,28 +1460,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
}
cc.mu.Unlock()
- if newStreamHook != nil {
- newStreamHook(cs)
- }
-
- // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
- if !cc.t.disableCompression() &&
- req.Header.Get("Accept-Encoding") == "" &&
- req.Header.Get("Range") == "" &&
- !cs.isHead {
- // Request gzip only, not deflate. Deflate is ambiguous and
- // not as universally supported anyway.
- // See: https://zlib.net/zlib_faq.html#faq39
- //
- // Note that we don't request this for HEAD requests,
- // due to a bug in nginx:
- // http://trac.nginx.org/nginx/ticket/358
- // https://golang.org/issue/5522
- //
- // We don't request gzip if the request is for a range, since
- // auto-decoding a portion of a gzipped document will just fail
- // anyway. See https://golang.org/issue/8923
- cs.requestedGzip = true
+ if streamf != nil {
+ streamf(cs)
}
continueTimeout := cc.t.expectContinueTimeout()
@@ -1594,7 +1524,7 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
var respHeaderTimer <-chan time.Time
var respHeaderRecv chan struct{}
if d := cc.responseHeaderTimeout(); d != 0 {
- timer := cc.newTimer(d)
+ timer := cc.t.newTimer(d)
defer timer.Stop()
respHeaderTimer = timer.C()
respHeaderRecv = cs.respHeaderRecv
@@ -1603,21 +1533,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
// or until the request is aborted (via context, error, or otherwise),
// whichever comes first.
for {
- if cc.syncHooks != nil {
- cc.syncHooks.blockUntil(func() bool {
- select {
- case <-cs.peerClosed:
- case <-respHeaderTimer:
- case <-respHeaderRecv:
- case <-cs.abort:
- case <-ctx.Done():
- case <-cs.reqCancel:
- default:
- return false
- }
- return true
- })
- }
select {
case <-cs.peerClosed:
return nil
@@ -1766,7 +1681,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
return nil
}
cc.pendingRequests++
- cc.condWait()
+ cc.cond.Wait()
cc.pendingRequests--
select {
case <-cs.abort:
@@ -2028,7 +1943,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
cs.flow.take(take)
return take, nil
}
- cc.condWait()
+ cc.cond.Wait()
}
}
@@ -2311,7 +2226,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
}
// Wake up writeRequestBody via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
- cc.condBroadcast()
+ cc.cond.Broadcast()
closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
@@ -2333,6 +2248,7 @@ type clientConnReadLoop struct {
// readLoop runs in its own goroutine and reads and dispatches frames.
func (cc *ClientConn) readLoop() {
+ cc.t.markNewGoroutine()
rl := &clientConnReadLoop{cc: cc}
defer rl.cleanup()
cc.readerErr = rl.run()
@@ -2399,7 +2315,7 @@ func (rl *clientConnReadLoop) cleanup() {
cs.abortStreamLocked(err)
}
}
- cc.condBroadcast()
+ cc.cond.Broadcast()
cc.mu.Unlock()
}
@@ -2436,7 +2352,7 @@ func (rl *clientConnReadLoop) run() error {
readIdleTimeout := cc.t.ReadIdleTimeout
var t timer
if readIdleTimeout != 0 {
- t = cc.afterFunc(readIdleTimeout, cc.healthCheck)
+ t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
}
for {
f, err := cc.fr.ReadFrame()
@@ -3034,7 +2950,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
for _, cs := range cc.streams {
cs.flow.add(delta)
}
- cc.condBroadcast()
+ cc.cond.Broadcast()
cc.initialWindowSize = s.Val
case SettingHeaderTableSize:
@@ -3089,7 +3005,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
return ConnectionError(ErrCodeFlowControl)
}
- cc.condBroadcast()
+ cc.cond.Broadcast()
return nil
}
@@ -3133,7 +3049,8 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
}
var pingError error
errc := make(chan struct{})
- cc.goRun(func() {
+ go func() {
+ cc.t.markNewGoroutine()
cc.wmu.Lock()
defer cc.wmu.Unlock()
if pingError = cc.fr.WritePing(false, p); pingError != nil {
@@ -3144,20 +3061,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
close(errc)
return
}
- })
- if cc.syncHooks != nil {
- cc.syncHooks.blockUntil(func() bool {
- select {
- case <-c:
- case <-errc:
- case <-ctx.Done():
- case <-cc.readerDone:
- default:
- return false
- }
- return true
- })
- }
+ }()
select {
case <-c:
return nil
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go
index 0a242c669e2..f6783339d11 100644
--- a/vendor/golang.org/x/net/http2/writesched_priority.go
+++ b/vendor/golang.org/x/net/http2/writesched_priority.go
@@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max
}
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
- for k := n.kids; k != nil; k = k.next {
- k.setParent(n.parent)
+ for n.kids != nil {
+ n.kids.setParent(n.parent)
}
n.setParent(nil)
delete(ws.nodes, n.id)
diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go
index 573fe79e86e..d7d4b8b6e35 100644
--- a/vendor/golang.org/x/net/proxy/per_host.go
+++ b/vendor/golang.org/x/net/proxy/per_host.go
@@ -137,9 +137,7 @@ func (p *PerHost) AddNetwork(net *net.IPNet) {
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
// "example.com" matches "example.com" and all of its subdomains.
func (p *PerHost) AddZone(zone string) {
- if strings.HasSuffix(zone, ".") {
- zone = zone[:len(zone)-1]
- }
+ zone = strings.TrimSuffix(zone, ".")
if !strings.HasPrefix(zone, ".") {
zone = "." + zone
}
@@ -148,8 +146,6 @@ func (p *PerHost) AddZone(zone string) {
// AddHost specifies a host name that will use the bypass proxy.
func (p *PerHost) AddHost(host string) {
- if strings.HasSuffix(host, ".") {
- host = host[:len(host)-1]
- }
+ host = strings.TrimSuffix(host, ".")
p.bypassHosts = append(p.bypassHosts, host)
}
diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go
index ba931c2c3de..7b82e7a0837 100644
--- a/vendor/golang.org/x/oauth2/google/google.go
+++ b/vendor/golang.org/x/oauth2/google/google.go
@@ -252,7 +252,10 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar
// Further information about retrieving access tokens from the GCE metadata
// server can be found at https://cloud.google.com/compute/docs/authentication.
func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource {
- return computeTokenSource(account, 0, scope...)
+ // refresh 3 minutes and 45 seconds early. The shortest MDS cache is currently 4 minutes, so any
+ // refreshes earlier are a waste of compute.
+ earlyExpirySecs := 225 * time.Second
+ return computeTokenSource(account, earlyExpirySecs, scope...)
}
func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource {
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index fdcaa974d23..4ed2e488b61 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -263,6 +263,7 @@ struct ltchars {
#include
#include
#include
+#include
#include
#include
#include
@@ -549,6 +550,7 @@ ccflags="$@"
$2 !~ "NLA_TYPE_MASK" &&
$2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ &&
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
+ $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ ||
$2 ~ /^FIORDCHK$/ ||
$2 ~ /^SIOC/ ||
$2 ~ /^TIOC/ ||
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 93a38a97d9c..877a62b479a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -502,6 +502,7 @@ const (
BPF_IMM = 0x0
BPF_IND = 0x40
BPF_JA = 0x0
+ BPF_JCOND = 0xe0
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
@@ -657,6 +658,9 @@ const (
CAN_NPROTO = 0x8
CAN_RAW = 0x1
CAN_RAW_FILTER_MAX = 0x200
+ CAN_RAW_XL_VCID_RX_FILTER = 0x4
+ CAN_RAW_XL_VCID_TX_PASS = 0x2
+ CAN_RAW_XL_VCID_TX_SET = 0x1
CAN_RTR_FLAG = 0x40000000
CAN_SFF_ID_BITS = 0xb
CAN_SFF_MASK = 0x7ff
@@ -1339,6 +1343,7 @@ const (
F_OFD_SETLK = 0x25
F_OFD_SETLKW = 0x26
F_OK = 0x0
+ F_SEAL_EXEC = 0x20
F_SEAL_FUTURE_WRITE = 0x10
F_SEAL_GROW = 0x4
F_SEAL_SEAL = 0x1
@@ -1627,6 +1632,7 @@ const (
IP_FREEBIND = 0xf
IP_HDRINCL = 0x3
IP_IPSEC_POLICY = 0x10
+ IP_LOCAL_PORT_RANGE = 0x33
IP_MAXPACKET = 0xffff
IP_MAX_MEMBERSHIPS = 0x14
IP_MF = 0x2000
@@ -1653,6 +1659,7 @@ const (
IP_PMTUDISC_OMIT = 0x5
IP_PMTUDISC_PROBE = 0x3
IP_PMTUDISC_WANT = 0x1
+ IP_PROTOCOL = 0x34
IP_RECVERR = 0xb
IP_RECVERR_RFC4884 = 0x1a
IP_RECVFRAGSIZE = 0x19
@@ -2169,7 +2176,7 @@ const (
NFT_SECMARK_CTX_MAXLEN = 0x100
NFT_SET_MAXNAMELEN = 0x100
NFT_SOCKET_MAX = 0x3
- NFT_TABLE_F_MASK = 0x3
+ NFT_TABLE_F_MASK = 0x7
NFT_TABLE_MAXNAMELEN = 0x100
NFT_TRACETYPE_MAX = 0x3
NFT_TUNNEL_F_MASK = 0x7
@@ -2403,6 +2410,7 @@ const (
PERF_RECORD_MISC_USER = 0x2
PERF_SAMPLE_BRANCH_PLM_ALL = 0x7
PERF_SAMPLE_WEIGHT_TYPE = 0x1004000
+ PID_FS_MAGIC = 0x50494446
PIPEFS_MAGIC = 0x50495045
PPPIOCGNPMODE = 0xc008744c
PPPIOCNEWUNIT = 0xc004743e
@@ -2896,8 +2904,9 @@ const (
RWF_APPEND = 0x10
RWF_DSYNC = 0x2
RWF_HIPRI = 0x1
+ RWF_NOAPPEND = 0x20
RWF_NOWAIT = 0x8
- RWF_SUPPORTED = 0x1f
+ RWF_SUPPORTED = 0x3f
RWF_SYNC = 0x4
RWF_WRITE_LIFE_NOT_SET = 0x0
SCHED_BATCH = 0x3
@@ -2918,7 +2927,9 @@ const (
SCHED_RESET_ON_FORK = 0x40000000
SCHED_RR = 0x2
SCM_CREDENTIALS = 0x2
+ SCM_PIDFD = 0x4
SCM_RIGHTS = 0x1
+ SCM_SECURITY = 0x3
SCM_TIMESTAMP = 0x1d
SC_LOG_FLUSH = 0x100000
SECCOMP_ADDFD_FLAG_SEND = 0x2
@@ -3051,6 +3062,8 @@ const (
SIOCSMIIREG = 0x8949
SIOCSRARP = 0x8962
SIOCWANDEV = 0x894a
+ SK_DIAG_BPF_STORAGE_MAX = 0x3
+ SK_DIAG_BPF_STORAGE_REQ_MAX = 0x1
SMACK_MAGIC = 0x43415d53
SMART_AUTOSAVE = 0xd2
SMART_AUTO_OFFLINE = 0xdb
@@ -3071,6 +3084,8 @@ const (
SOCKFS_MAGIC = 0x534f434b
SOCK_BUF_LOCK_MASK = 0x3
SOCK_DCCP = 0x6
+ SOCK_DESTROY = 0x15
+ SOCK_DIAG_BY_FAMILY = 0x14
SOCK_IOC_TYPE = 0x89
SOCK_PACKET = 0xa
SOCK_RAW = 0x3
@@ -3260,6 +3275,7 @@ const (
TCP_MAX_WINSHIFT = 0xe
TCP_MD5SIG = 0xe
TCP_MD5SIG_EXT = 0x20
+ TCP_MD5SIG_FLAG_IFINDEX = 0x2
TCP_MD5SIG_FLAG_PREFIX = 0x1
TCP_MD5SIG_MAXKEYLEN = 0x50
TCP_MSS = 0x200
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 42ff8c3c1b0..e4bc0bd57c7 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -118,6 +118,7 @@ const (
IXOFF = 0x1000
IXON = 0x400
MAP_32BIT = 0x40
+ MAP_ABOVE4G = 0x80
MAP_ANON = 0x20
MAP_ANONYMOUS = 0x20
MAP_DENYWRITE = 0x800
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index dca436004fa..689317afdbf 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -118,6 +118,7 @@ const (
IXOFF = 0x1000
IXON = 0x400
MAP_32BIT = 0x40
+ MAP_ABOVE4G = 0x80
MAP_ANON = 0x20
MAP_ANONYMOUS = 0x20
MAP_DENYWRITE = 0x800
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index d8cae6d1534..14270508b04 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -87,6 +87,7 @@ const (
FICLONE = 0x40049409
FICLONERANGE = 0x4020940d
FLUSHO = 0x1000
+ FPMR_MAGIC = 0x46504d52
FPSIMD_MAGIC = 0x46508001
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80086601
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 0036746ea19..4740b834854 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -4605,7 +4605,7 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
- NL80211_ATTR_MAX = 0x149
+ NL80211_ATTR_MAX = 0x14a
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_MATCH_SETS = 0x85
@@ -5209,7 +5209,7 @@ const (
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
- NL80211_FREQUENCY_ATTR_MAX = 0x1f
+ NL80211_FREQUENCY_ATTR_MAX = 0x20
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
@@ -5703,7 +5703,7 @@ const (
NL80211_STA_FLAG_ASSOCIATED = 0x7
NL80211_STA_FLAG_AUTHENTICATED = 0x5
NL80211_STA_FLAG_AUTHORIZED = 0x1
- NL80211_STA_FLAG_MAX = 0x7
+ NL80211_STA_FLAG_MAX = 0x8
NL80211_STA_FLAG_MAX_OLD_API = 0x6
NL80211_STA_FLAG_MFP = 0x4
NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2
@@ -6001,3 +6001,34 @@ type CachestatRange struct {
Off uint64
Len uint64
}
+
+const (
+ SK_MEMINFO_RMEM_ALLOC = 0x0
+ SK_MEMINFO_RCVBUF = 0x1
+ SK_MEMINFO_WMEM_ALLOC = 0x2
+ SK_MEMINFO_SNDBUF = 0x3
+ SK_MEMINFO_FWD_ALLOC = 0x4
+ SK_MEMINFO_WMEM_QUEUED = 0x5
+ SK_MEMINFO_OPTMEM = 0x6
+ SK_MEMINFO_BACKLOG = 0x7
+ SK_MEMINFO_DROPS = 0x8
+ SK_MEMINFO_VARS = 0x9
+ SKNLGRP_NONE = 0x0
+ SKNLGRP_INET_TCP_DESTROY = 0x1
+ SKNLGRP_INET_UDP_DESTROY = 0x2
+ SKNLGRP_INET6_TCP_DESTROY = 0x3
+ SKNLGRP_INET6_UDP_DESTROY = 0x4
+ SK_DIAG_BPF_STORAGE_REQ_NONE = 0x0
+ SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 0x1
+ SK_DIAG_BPF_STORAGE_REP_NONE = 0x0
+ SK_DIAG_BPF_STORAGE = 0x1
+ SK_DIAG_BPF_STORAGE_NONE = 0x0
+ SK_DIAG_BPF_STORAGE_PAD = 0x1
+ SK_DIAG_BPF_STORAGE_MAP_ID = 0x2
+ SK_DIAG_BPF_STORAGE_MAP_VALUE = 0x3
+)
+
+type SockDiagReq struct {
+ Family uint8
+ Protocol uint8
+}
diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go
index 26be94a8a7b..6f7d2ac70a9 100644
--- a/vendor/golang.org/x/sys/windows/security_windows.go
+++ b/vendor/golang.org/x/sys/windows/security_windows.go
@@ -68,6 +68,7 @@ type UserInfo10 struct {
//sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo
//sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation
//sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree
+//sys NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum
const (
// do not reorder
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 5c6035ddfa9..9f73df75b5f 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -401,6 +401,7 @@ var (
procTransmitFile = modmswsock.NewProc("TransmitFile")
procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree")
procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation")
+ procNetUserEnum = modnetapi32.NewProc("NetUserEnum")
procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo")
procNtCreateFile = modntdll.NewProc("NtCreateFile")
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
@@ -3486,6 +3487,14 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete
return
}
+func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) {
+ r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0)
+ if r0 != 0 {
+ neterr = syscall.Errno(r0)
+ }
+ return
+}
+
func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) {
r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0)
if r0 != 0 {
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
index eb7a8282f9e..af0ee6c614d 100644
--- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
@@ -8,12 +8,14 @@ package gocommand
import (
"bytes"
"context"
+ "encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
+ "path/filepath"
"reflect"
"regexp"
"runtime"
@@ -167,7 +169,9 @@ type Invocation struct {
// TODO(rfindley): remove, in favor of Args.
ModFile string
- // If Overlay is set, the go command is invoked with -overlay=Overlay.
+ // Overlay is the name of the JSON overlay file that describes
+ // unsaved editor buffers; see [WriteOverlays].
+ // If set, the go command is invoked with -overlay=Overlay.
// TODO(rfindley): remove, in favor of Args.
Overlay string
@@ -255,12 +259,15 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
waitDelay.Set(reflect.ValueOf(30 * time.Second))
}
- // On darwin the cwd gets resolved to the real path, which breaks anything that
- // expects the working directory to keep the original path, including the
+ // The cwd gets resolved to the real path. On Darwin, where
+ // /tmp is a symlink, this breaks anything that expects the
+ // working directory to keep the original path, including the
// go command when dealing with modules.
- // The Go stdlib has a special feature where if the cwd and the PWD are the
- // same node then it trusts the PWD, so by setting it in the env for the child
- // process we fix up all the paths returned by the go command.
+ //
+ // os.Getwd has a special feature where if the cwd and the PWD
+ // are the same node then it trusts the PWD, so by setting it
+ // in the env for the child process we fix up all the paths
+ // returned by the go command.
if !i.CleanEnv {
cmd.Env = os.Environ()
}
@@ -351,6 +358,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
}
}
+ startTime := time.Now()
err = cmd.Start()
if stdoutW != nil {
// The child process has inherited the pipe file,
@@ -377,7 +385,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
case err := <-resChan:
return err
case <-timer.C:
- HandleHangingGoCommand(cmd.Process)
+ HandleHangingGoCommand(startTime, cmd)
case <-ctx.Done():
}
} else {
@@ -411,7 +419,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
return <-resChan
}
-func HandleHangingGoCommand(proc *os.Process) {
+func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) {
switch runtime.GOOS {
case "linux", "darwin", "freebsd", "netbsd":
fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND
@@ -444,7 +452,7 @@ See golang/go#54461 for more details.`)
panic(fmt.Sprintf("running %s: %v", listFiles, err))
}
}
- panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid))
+ panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid))
}
func cmdDebugStr(cmd *exec.Cmd) string {
@@ -468,3 +476,73 @@ func cmdDebugStr(cmd *exec.Cmd) string {
}
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
}
+
+// WriteOverlays writes each value in the overlay (see the Overlay
+// field of go/packages.Config) to a temporary file and returns the name
+// of a JSON file describing the mapping that is suitable for the "go
+// list -overlay" flag.
+//
+// On success, the caller must call the cleanup function exactly once
+// when the files are no longer needed.
+func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(), err error) {
+ // Do nothing if there are no overlays in the config.
+ if len(overlay) == 0 {
+ return "", func() {}, nil
+ }
+
+ dir, err := os.MkdirTemp("", "gocommand-*")
+ if err != nil {
+ return "", nil, err
+ }
+
+ // The caller must clean up this directory,
+ // unless this function returns an error.
+ // (The cleanup operand of each return
+ // statement below is ignored.)
+ defer func() {
+ cleanup = func() {
+ os.RemoveAll(dir)
+ }
+ if err != nil {
+ cleanup()
+ cleanup = nil
+ }
+ }()
+
+ // Write each map entry to a temporary file.
+ overlays := make(map[string]string)
+ for k, v := range overlay {
+ // Use a unique basename for each file (001-foo.go),
+ // to avoid creating nested directories.
+ base := fmt.Sprintf("%d-%s.go", 1+len(overlays), filepath.Base(k))
+ filename := filepath.Join(dir, base)
+ err := os.WriteFile(filename, v, 0666)
+ if err != nil {
+ return "", nil, err
+ }
+ overlays[k] = filename
+ }
+
+ // Write the JSON overlay file that maps logical file names to temp files.
+ //
+ // OverlayJSON is the format overlay files are expected to be in.
+ // The Replace map maps from overlaid paths to replacement paths:
+ // the Go command will forward all reads trying to open
+ // each overlaid path to its replacement path, or consider the overlaid
+ // path not to exist if the replacement path is empty.
+ //
+ // From golang/go#39958.
+ type OverlayJSON struct {
+ Replace map[string]string `json:"replace,omitempty"`
+ }
+ b, err := json.Marshal(OverlayJSON{Replace: overlays})
+ if err != nil {
+ return "", nil, err
+ }
+ filename = filepath.Join(dir, "overlay.json")
+ if err := os.WriteFile(filename, b, 0666); err != nil {
+ return "", nil, err
+ }
+
+ return filename, nil, nil
+}
diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go
index 93d49a6efd0..4569313a089 100644
--- a/vendor/golang.org/x/tools/internal/imports/fix.go
+++ b/vendor/golang.org/x/tools/internal/imports/fix.go
@@ -104,7 +104,10 @@ type packageInfo struct {
// parseOtherFiles parses all the Go files in srcDir except filename, including
// test files if filename looks like a test.
-func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File {
+//
+// It returns an error only if ctx is cancelled. Files with parse errors are
+// ignored.
+func parseOtherFiles(ctx context.Context, fset *token.FileSet, srcDir, filename string) ([]*ast.File, error) {
// This could use go/packages but it doesn't buy much, and it fails
// with https://golang.org/issue/26296 in LoadFiles mode in some cases.
considerTests := strings.HasSuffix(filename, "_test.go")
@@ -112,11 +115,14 @@ func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File {
fileBase := filepath.Base(filename)
packageFileInfos, err := os.ReadDir(srcDir)
if err != nil {
- return nil
+ return nil, ctx.Err()
}
var files []*ast.File
for _, fi := range packageFileInfos {
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
if fi.Name() == fileBase || !strings.HasSuffix(fi.Name(), ".go") {
continue
}
@@ -132,7 +138,7 @@ func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File {
files = append(files, f)
}
- return files
+ return files, ctx.Err()
}
// addGlobals puts the names of package vars into the provided map.
@@ -557,12 +563,7 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
// fixImports adds and removes imports from f so that all its references are
// satisfied and there are no unused imports.
-//
-// This is declared as a variable rather than a function so goimports can
-// easily be extended by adding a file with an init function.
-var fixImports = fixImportsDefault
-
-func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
+func fixImports(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
fixes, err := getFixes(context.Background(), fset, f, filename, env)
if err != nil {
return err
@@ -592,7 +593,10 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
return fixes, nil
}
- otherFiles := parseOtherFiles(fset, srcDir, filename)
+ otherFiles, err := parseOtherFiles(ctx, fset, srcDir, filename)
+ if err != nil {
+ return nil, err
+ }
// Second pass: add information from other files in the same package,
// like their package vars and imports.
@@ -1192,7 +1196,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
if err != nil {
return err
}
- if err = resolver.scan(context.Background(), callback); err != nil {
+ if err = resolver.scan(ctx, callback); err != nil {
return err
}
@@ -1203,7 +1207,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
}
results := make(chan result, len(refs))
- ctx, cancel := context.WithCancel(context.TODO())
+ ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
defer func() {
cancel()
diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go
index 66e8bfdb289..caf8441363e 100644
--- a/vendor/google.golang.org/api/internal/version.go
+++ b/vendor/google.golang.org/api/internal/version.go
@@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "0.182.0"
+const Version = "0.183.0"
diff --git a/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go
index 72afd8b000e..c7bef08aadd 100644
--- a/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/type/date/date.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2021 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.12.2
+// protoc v4.24.4
// source: google/type/date.proto
package date
diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go
index 38ef56f73ca..7d57f34b4f5 100644
--- a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2021 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.12.2
+// protoc v4.24.4
// source: google/type/expr.proto
package expr
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 4d1ccb19517..82f181cd65c 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -4,7 +4,7 @@ cloud.google.com/go/internal
cloud.google.com/go/internal/optional
cloud.google.com/go/internal/trace
cloud.google.com/go/internal/version
-# cloud.google.com/go/auth v0.4.2
+# cloud.google.com/go/auth v0.5.1
## explicit; go 1.20
cloud.google.com/go/auth
cloud.google.com/go/auth/credentials
@@ -345,10 +345,10 @@ github.com/pmezard/go-difflib/difflib
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
-# github.com/prometheus/client_model v0.6.0
+# github.com/prometheus/client_model v0.6.1
## explicit; go 1.19
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.53.0
+# github.com/prometheus/common v0.54.0
## explicit; go 1.20
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
@@ -475,7 +475,7 @@ go.uber.org/zap/internal/stacktrace
go.uber.org/zap/internal/ztest
go.uber.org/zap/zapcore
go.uber.org/zap/zaptest
-# golang.org/x/crypto v0.23.0
+# golang.org/x/crypto v0.24.0
## explicit; go 1.18
golang.org/x/crypto/cast5
golang.org/x/crypto/chacha20
@@ -492,12 +492,12 @@ golang.org/x/crypto/openpgp/errors
golang.org/x/crypto/openpgp/packet
golang.org/x/crypto/openpgp/s2k
golang.org/x/crypto/pbkdf2
-# golang.org/x/mod v0.17.0
+# golang.org/x/mod v0.18.0
## explicit; go 1.18
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
golang.org/x/mod/semver
-# golang.org/x/net v0.25.0
+# golang.org/x/net v0.26.0
## explicit; go 1.18
golang.org/x/net/context
golang.org/x/net/http/httpguts
@@ -509,7 +509,7 @@ golang.org/x/net/internal/socks
golang.org/x/net/internal/timeseries
golang.org/x/net/proxy
golang.org/x/net/trace
-# golang.org/x/oauth2 v0.20.0
+# golang.org/x/oauth2 v0.21.0
## explicit; go 1.18
golang.org/x/oauth2
golang.org/x/oauth2/authhandler
@@ -526,16 +526,16 @@ golang.org/x/oauth2/jwt
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
golang.org/x/sync/singleflight
-# golang.org/x/sys v0.20.0
+# golang.org/x/sys v0.21.0
## explicit; go 1.18
golang.org/x/sys/cpu
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
-# golang.org/x/term v0.20.0
+# golang.org/x/term v0.21.0
## explicit; go 1.18
golang.org/x/term
-# golang.org/x/text v0.15.0
+# golang.org/x/text v0.16.0
## explicit; go 1.18
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
@@ -544,7 +544,7 @@ golang.org/x/text/unicode/norm
# golang.org/x/time v0.5.0
## explicit; go 1.18
golang.org/x/time/rate
-# golang.org/x/tools v0.21.0
+# golang.org/x/tools v0.22.0
## explicit; go 1.19
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/imports
@@ -559,7 +559,7 @@ golang.org/x/tools/internal/stdlib
# gomodules.xyz/jsonpatch/v2 v2.4.0
## explicit; go 1.20
gomodules.xyz/jsonpatch/v2
-# google.golang.org/api v0.182.0
+# google.golang.org/api v0.183.0
## explicit; go 1.20
google.golang.org/api/googleapi
google.golang.org/api/googleapi/transport
@@ -578,16 +578,16 @@ google.golang.org/api/transport
google.golang.org/api/transport/grpc
google.golang.org/api/transport/http
google.golang.org/api/transport/http/internal/propagation
-# google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda
-## explicit; go 1.19
+# google.golang.org/genproto v0.0.0-20240528184218-531527333157
+## explicit; go 1.20
google.golang.org/genproto/googleapis/type/date
google.golang.org/genproto/googleapis/type/expr
-# google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8
-## explicit; go 1.19
+# google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e
+## explicit; go 1.20
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
google.golang.org/genproto/googleapis/api/httpbody
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157
## explicit; go 1.20
google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/errdetails
@@ -1243,16 +1243,16 @@ k8s.io/utils/pointer
k8s.io/utils/ptr
k8s.io/utils/strings/slices
k8s.io/utils/trace
-# knative.dev/hack v0.0.0-20240529131459-3b6d6441e7ea
+# knative.dev/hack v0.0.0-20240607132042-09143140a254
## explicit; go 1.18
knative.dev/hack
-# knative.dev/hack/schema v0.0.0-20240529131459-3b6d6441e7ea
+# knative.dev/hack/schema v0.0.0-20240607132042-09143140a254
## explicit; go 1.18
knative.dev/hack/schema/commands
knative.dev/hack/schema/docs
knative.dev/hack/schema/registry
knative.dev/hack/schema/schema
-# knative.dev/pkg v0.0.0-20240602234151-229e527a1366
+# knative.dev/pkg v0.0.0-20240610120318-15e6cdf2f386
## explicit; go 1.21
knative.dev/pkg/apiextensions/storageversion
knative.dev/pkg/apiextensions/storageversion/cmd/migrate
@@ -1405,7 +1405,7 @@ knative.dev/pkg/webhook/resourcesemantics
knative.dev/pkg/webhook/resourcesemantics/conversion
knative.dev/pkg/webhook/resourcesemantics/defaulting
knative.dev/pkg/webhook/resourcesemantics/validation
-# knative.dev/reconciler-test v0.0.0-20240603135328-1f340aa2b068
+# knative.dev/reconciler-test v0.0.0-20240607131348-2023469d1158
## explicit; go 1.21
knative.dev/reconciler-test/cmd/eventshub
knative.dev/reconciler-test/pkg/environment
From c60bcd048fd32c31106b8aa5322fd2cdfd56b4c2 Mon Sep 17 00:00:00 2001
From: Calum Murray
Date: Tue, 11 Jun 2024 05:12:49 -0400
Subject: [PATCH 04/33] fix: add comments and improve regex for attribute
filter transform function (#7967)
Signed-off-by: Calum Murray
---
pkg/graph/transforms.go | 23 +++++++++++++++++++++--
1 file changed, 21 insertions(+), 2 deletions(-)
diff --git a/pkg/graph/transforms.go b/pkg/graph/transforms.go
index 276ba0096a4..911f0324bd7 100644
--- a/pkg/graph/transforms.go
+++ b/pkg/graph/transforms.go
@@ -44,6 +44,10 @@ type AttributesFilterTransform struct {
var _ Transform = &AttributesFilterTransform{}
+// Apply will apply the transform to a given input eventtype. This will "narrow" the eventtype to represent only events which could pass the attribute filter.
+// For example, if the "type" attribute was not yet set on the eventtype, but the filter requires "type"="example.event.type", then after this filter the
+// eventtype would have the attribute definition for "type"="example.event.type". Additionally, if an eventtype can not be narrowed this returns nil. For
+// example using the filter from the earlier example, if the "type" was already set to "other.event.type" then the eventtype would not be compatible.
func (aft *AttributesFilterTransform) Apply(et *eventingv1beta3.EventType, tfc TransformFunctionContext) (*eventingv1beta3.EventType, TransformFunctionContext) {
etAttributes := make(map[string]*eventingv1beta3.EventAttributeDefinition)
for i := range et.Spec.Attributes {
@@ -87,15 +91,27 @@ func (aft *AttributesFilterTransform) Name() string {
return "attributes-filter"
}
+// buildRegexForAttribute build a regex which detects whether the current value for the attribute is compatible
+// with the value required by the attribute filter. Specifically, it will handle variables in the eventtype so
+// that if there are values that can be set in the variable so that it can match the attribute filter, those
+// values will be chosen.
func buildRegexForAttribute(attribute string) (*regexp.Regexp, error) {
chunks := []string{"^"}
var chunk strings.Builder
for i := 0; i < len(attribute); {
- if attribute[i] == '{' {
- chunks = append(chunks, chunk.String(), "[a-zA-Z]+")
+ // handle escaped curly brackets. If not escaped, treat them like part of a variable
+ if attribute[i] == '\\' && i+1 < len(attribute) && (attribute[i+1] == '{' || attribute[i+1] == '}') {
+ chunk.WriteByte(attribute[i+1])
+ i += 2
+ continue
+ } else if attribute[i] == '{' {
+ // this regex allows any character except those disallowed by the cloudevents spec. Technically, it is slightly more permissive
+ // than the cloudevents spec, because creating a regex for code surrogates used properly in pairs is complicated
+ chunks = append(chunks, chunk.String(), "[^\\x{0000}-\\x{001f}\\x{007f}-\\x{009f}\\x{fdd0}-\\x{fdef}\\x{fffe}\\x{ffff}]+")
chunk.Reset()
+ // for variable names we only allow [a-zA-Z0-9]+, so we don't need to worry about escaped characters or brackets in the variable name
offset := strings.Index(attribute[i:], "}")
if offset == -1 {
return nil, fmt.Errorf("no closing bracket for variable")
@@ -103,6 +119,9 @@ func buildRegexForAttribute(attribute string) (*regexp.Regexp, error) {
i += offset + 1
continue
+ } else if attribute[i] == '}' {
+ // we have an unpaired bracket
+ return nil, fmt.Errorf("no opening bracket for a closing bracket. If you want to have a bracket in your value, please escape it with a \\ character")
}
chunk.WriteByte(attribute[i])
From 834d833865ac7373005033a7fc671e231c634461 Mon Sep 17 00:00:00 2001
From: Pierangelo Di Pilato
Date: Tue, 11 Jun 2024 17:25:03 +0200
Subject: [PATCH 05/33] Add JobSink data plane symlink in config/ (#7990)
Signed-off-by: Pierangelo Di Pilato
---
config/500-job-sink.yaml | 1 +
1 file changed, 1 insertion(+)
create mode 120000 config/500-job-sink.yaml
diff --git a/config/500-job-sink.yaml b/config/500-job-sink.yaml
new file mode 120000
index 00000000000..75611aca7ed
--- /dev/null
+++ b/config/500-job-sink.yaml
@@ -0,0 +1 @@
+config/core/deployments/job-sink.yaml
\ No newline at end of file
From 0bce743fd87fbb91cbeb4ad0b9bddd998b96c85f Mon Sep 17 00:00:00 2001
From: Pierangelo Di Pilato
Date: Tue, 11 Jun 2024 19:03:13 +0200
Subject: [PATCH 06/33] Support arbitrary data for rekt configmap package
(#7991)
This will help with testing the CA trust bundle rotation.
Signed-off-by: Pierangelo Di Pilato
---
.../resources/configmap/config-features.yaml | 15 ++++-
test/rekt/resources/configmap/configmap.go | 13 +++++
.../resources/configmap/configmap_test.go | 55 +++++++++++++++++++
3 files changed, 82 insertions(+), 1 deletion(-)
create mode 100644 test/rekt/resources/configmap/configmap_test.go
diff --git a/test/rekt/resources/configmap/config-features.yaml b/test/rekt/resources/configmap/config-features.yaml
index 6dd98e50f9d..2a56cb9bc04 100644
--- a/test/rekt/resources/configmap/config-features.yaml
+++ b/test/rekt/resources/configmap/config-features.yaml
@@ -3,10 +3,23 @@ kind: ConfigMap
metadata:
name: {{ .name }}
namespace: {{ .namespace }}
+ {{ if .labels }}
+ labels:
+ {{ range $key, $value := .labels }}
+ {{ $key }}: "{{ $value }}"
+ {{ end }}
+ {{ else }}
labels:
knative.dev/config-propagation: original
knative.dev/config-category: eventing
+ {{ end }}
data:
+ {{ if .data }}
+ {{ range $key, $value := .data }}
+ {{ $key }}: |-
+ {{ $value }}
+ {{ end }}
+ {{ else }}
_example: |
my-enabled-flag: "enabled"
my-disabled-flag: "disabled"
@@ -14,4 +27,4 @@ data:
apiserversources-nodeselector-testkey: testvalue
apiserversources-nodeselector-testkey1: testvalue1
apiserversources-nodeselector-testkey2: testvalue2
-
+ {{ end }}
diff --git a/test/rekt/resources/configmap/configmap.go b/test/rekt/resources/configmap/configmap.go
index 8776df0c0e6..f6e31fcd24a 100644
--- a/test/rekt/resources/configmap/configmap.go
+++ b/test/rekt/resources/configmap/configmap.go
@@ -19,6 +19,7 @@ package configmap
import (
"context"
"embed"
+ "strings"
"knative.dev/reconciler-test/pkg/feature"
"knative.dev/reconciler-test/pkg/manifest"
@@ -44,3 +45,15 @@ func Install(name string, ns string, opts ...manifest.CfgFn) feature.StepFn {
}
}
}
+
+var WithLabels = manifest.WithLabels
+
+func WithData(key, value string) manifest.CfgFn {
+ return func(m map[string]interface{}) {
+ if _, ok := m["data"]; !ok {
+ m["data"] = map[string]string{}
+ }
+ value = strings.ReplaceAll(value, "\n", "\n ")
+ m["data"].(map[string]string)[key] = value
+ }
+}
diff --git a/test/rekt/resources/configmap/configmap_test.go b/test/rekt/resources/configmap/configmap_test.go
new file mode 100644
index 00000000000..d38491b9e0c
--- /dev/null
+++ b/test/rekt/resources/configmap/configmap_test.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package configmap
+
+import (
+ "os"
+
+ testlog "knative.dev/reconciler-test/pkg/logging"
+ "knative.dev/reconciler-test/pkg/manifest"
+)
+
+func Example_withData() {
+ ctx := testlog.NewContext()
+ images := map[string]string{}
+ cfg := map[string]interface{}{
+ "name": "foo",
+ "namespace": "bar",
+ }
+
+ WithData("ca.crt", "x\nx")(cfg)
+ WithLabels(map[string]string{"a": "b"})(cfg)
+
+ files, err := manifest.ExecuteYAML(ctx, yaml, images, cfg)
+ if err != nil {
+ panic(err)
+ }
+
+ manifest.OutputYAML(os.Stdout, files)
+ // Output:
+ // apiVersion: v1
+ // kind: ConfigMap
+ // metadata:
+ // name: foo
+ // namespace: bar
+ // labels:
+ // a: "b"
+ // data:
+ // ca.crt: |-
+ // x
+ // x
+}
From b18b1b1f3ffff14ac5b416541107bc9c8f1daf3c Mon Sep 17 00:00:00 2001
From: Pierangelo Di Pilato
Date: Tue, 11 Jun 2024 20:50:06 +0200
Subject: [PATCH 07/33] JobSink: add webhook validation for spec.job (#7962)
* JobSink: add webhook validation for spec.job
Signed-off-by: Pierangelo Di Pilato
* Randomize temporary Job name
Signed-off-by: Pierangelo Di Pilato
---------
Signed-off-by: Pierangelo Di Pilato
---
cmd/webhook/main.go | 12 ++++++++++-
config/core/roles/webhook-clusterrole.yaml | 4 ++++
docs/sink/jobsink-invalid.yaml | 21 +++++++++++++++++++
pkg/apis/sinks/register.go | 21 +++++++++++++++++++
.../sinks/v1alpha1/job_sink_validation.go | 20 ++++++++++++++++++
5 files changed, 77 insertions(+), 1 deletion(-)
create mode 100644 docs/sink/jobsink-invalid.yaml
diff --git a/cmd/webhook/main.go b/cmd/webhook/main.go
index 9dd361d90ef..1c9c25fa761 100644
--- a/cmd/webhook/main.go
+++ b/cmd/webhook/main.go
@@ -23,10 +23,12 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
+ kubeclient "knative.dev/pkg/client/injection/kube/client"
configmapinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap/filtered"
eventingv1beta3 "knative.dev/eventing/pkg/apis/eventing/v1beta3"
"knative.dev/eventing/pkg/apis/feature"
+ "knative.dev/eventing/pkg/apis/sinks"
sinksv1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1"
"knative.dev/eventing/pkg/auth"
"knative.dev/eventing/pkg/eventingtls"
@@ -156,9 +158,17 @@ func NewValidationAdmissionController(ctx context.Context, cmw configmap.Watcher
featureStore := feature.NewStore(logging.FromContext(ctx).Named("feature-config-store"))
featureStore.WatchConfigs(cmw)
+ k8s := kubeclient.Get(ctx)
+
// Decorate contexts with the current state of the config.
ctxFunc := func(ctx context.Context) context.Context {
- return featureStore.ToContext(channelStore.ToContext(pingstore.ToContext(store.ToContext(ctx))))
+ return sinks.WithConfig(
+ featureStore.ToContext(
+ channelStore.ToContext(
+ pingstore.ToContext(store.ToContext(ctx)))),
+ &sinks.Config{
+ KubeClient: k8s,
+ })
}
return validation.NewAdmissionController(ctx,
diff --git a/config/core/roles/webhook-clusterrole.yaml b/config/core/roles/webhook-clusterrole.yaml
index ae66a35c05a..9fdeb70a6a9 100644
--- a/config/core/roles/webhook-clusterrole.yaml
+++ b/config/core/roles/webhook-clusterrole.yaml
@@ -175,3 +175,7 @@ rules:
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list", "create", "update", "delete", "patch", "watch"]
+
+ - apiGroups: ["batch"]
+ resources: ["jobs"]
+ verbs: ["create"]
diff --git a/docs/sink/jobsink-invalid.yaml b/docs/sink/jobsink-invalid.yaml
new file mode 100644
index 00000000000..997c466b1ac
--- /dev/null
+++ b/docs/sink/jobsink-invalid.yaml
@@ -0,0 +1,21 @@
+apiVersion: sinks.knative.dev/v1alpha1
+kind: JobSink
+metadata:
+ name: job-sink-invalid
+spec:
+ job:
+ apiVersion: batch/v1
+ kind: Job
+ spec:
+ completions: 12
+ parallelism: 3
+ template:
+ spec:
+ # restartPolicy: Never -> missing field
+ containers:
+ - name: main
+ image: docker.io/library/bash:5
+ command: [ "bash" ] # example command simulating a bug which triggers the FailJob action
+ args:
+ - -c
+ - echo "Hello world!" && sleep 5
diff --git a/pkg/apis/sinks/register.go b/pkg/apis/sinks/register.go
index b37303c509c..676fa75e841 100644
--- a/pkg/apis/sinks/register.go
+++ b/pkg/apis/sinks/register.go
@@ -17,7 +17,10 @@ limitations under the License.
package sinks
import (
+ "context"
+
"k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/kubernetes"
)
const (
@@ -31,3 +34,21 @@ var (
Resource: "jobsinks",
}
)
+
+type Config struct {
+ KubeClient kubernetes.Interface
+}
+
+type configKey struct{}
+
+func WithConfig(ctx context.Context, cfg *Config) context.Context {
+ return context.WithValue(ctx, configKey{}, cfg)
+}
+
+func GetConfig(ctx context.Context) *Config {
+ v := ctx.Value(configKey{})
+ if v == nil {
+ panic("Missing value for config")
+ }
+ return v.(*Config)
+}
diff --git a/pkg/apis/sinks/v1alpha1/job_sink_validation.go b/pkg/apis/sinks/v1alpha1/job_sink_validation.go
index 0fe178dc65b..7ed631ba3c3 100644
--- a/pkg/apis/sinks/v1alpha1/job_sink_validation.go
+++ b/pkg/apis/sinks/v1alpha1/job_sink_validation.go
@@ -19,10 +19,15 @@ package v1alpha1
import (
"context"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apiserver/pkg/storage/names"
"knative.dev/pkg/apis"
+
+ "knative.dev/eventing/pkg/apis/sinks"
)
func (sink *JobSink) Validate(ctx context.Context) *apis.FieldError {
+ ctx = apis.WithinParent(ctx, sink.ObjectMeta)
return sink.Spec.Validate(ctx).ViaField("spec")
}
@@ -33,5 +38,20 @@ func (sink *JobSinkSpec) Validate(ctx context.Context) *apis.FieldError {
return errs.Also(apis.ErrMissingOneOf("job"))
}
+ if sink.Job != nil {
+ job := sink.Job.DeepCopy()
+ job.Name = names.SimpleNameGenerator.GenerateName(apis.ParentMeta(ctx).Name)
+ _, err := sinks.GetConfig(ctx).KubeClient.
+ BatchV1().
+ Jobs(apis.ParentMeta(ctx).Namespace).
+ Create(ctx, job, metav1.CreateOptions{
+ DryRun: []string{metav1.DryRunAll},
+ FieldValidation: metav1.FieldValidationStrict,
+ })
+ if err != nil {
+ return apis.ErrGeneric(err.Error(), "job")
+ }
+ }
+
return errs
}
From 67ceb9ed29d1fdf9daadb5a7f72688be08a56759 Mon Sep 17 00:00:00 2001
From: Pierangelo Di Pilato
Date: Wed, 12 Jun 2024 08:41:10 +0200
Subject: [PATCH 08/33] Move job sink certificate out of config core (#7993)
* Move JobSink Certificate out of config/core
This avoids including `Certificate` in `eventing-core.yaml`
Signed-off-by: Pierangelo Di Pilato
* Fix symlink
Signed-off-by: Pierangelo Di Pilato
---------
Signed-off-by: Pierangelo Di Pilato
---
config/500-job-sink.yaml | 2 +-
config/{core/tls => core-tls}/job-sink-tls-certificate.yaml | 0
hack/generate-yamls.sh | 1 +
3 files changed, 2 insertions(+), 1 deletion(-)
rename config/{core/tls => core-tls}/job-sink-tls-certificate.yaml (100%)
diff --git a/config/500-job-sink.yaml b/config/500-job-sink.yaml
index 75611aca7ed..29401f09225 120000
--- a/config/500-job-sink.yaml
+++ b/config/500-job-sink.yaml
@@ -1 +1 @@
-config/core/deployments/job-sink.yaml
\ No newline at end of file
+core/deployments/job-sink.yaml
\ No newline at end of file
diff --git a/config/core/tls/job-sink-tls-certificate.yaml b/config/core-tls/job-sink-tls-certificate.yaml
similarity index 100%
rename from config/core/tls/job-sink-tls-certificate.yaml
rename to config/core-tls/job-sink-tls-certificate.yaml
diff --git a/hack/generate-yamls.sh b/hack/generate-yamls.sh
index 1bee6f7e3e0..b1ace4e6cba 100755
--- a/hack/generate-yamls.sh
+++ b/hack/generate-yamls.sh
@@ -98,6 +98,7 @@ ko resolve ${KO_YAML_FLAGS} -Rf config/channels/in-memory-channel/ | "${LABEL_YA
# Create eventing TLS yaml
ko resolve ${KO_YAML_FLAGS} \
+ -Rf config/core-tls/ \
-Rf config/channels/in-memory-channel-tls/ \
-Rf config/brokers/mt-channel-broker-tls/ \
| "${LABEL_YAML_CMD[@]}" > "${EVENTING_TLS_YAML}"
From 6259113bc24df84f2f35242051f7e1b280fb9c23 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christoph=20St=C3=A4bler?=
Date: Wed, 12 Jun 2024 12:02:33 +0200
Subject: [PATCH 09/33] Fix possible nil pointer dereference in
event-dispatcher (#7994)
* Fix possible nil pointer dereference in event-dispatcher
* readErr -> err
---
pkg/kncloudevents/event_dispatcher.go | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/pkg/kncloudevents/event_dispatcher.go b/pkg/kncloudevents/event_dispatcher.go
index a9c3bd5cd45..f717ddafd8d 100644
--- a/pkg/kncloudevents/event_dispatcher.go
+++ b/pkg/kncloudevents/event_dispatcher.go
@@ -334,11 +334,11 @@ func (d *Dispatcher) executeRequest(ctx context.Context, target duckv1.Addressab
dispatchInfo.ResponseHeader = response.Header
body := new(bytes.Buffer)
- _, readErr := body.ReadFrom(response.Body)
+ _, err = body.ReadFrom(response.Body)
if isFailure(response.StatusCode) {
// Read response body into dispatchInfo for failures
- if readErr != nil && readErr != io.EOF {
+ if err != nil && err != io.EOF {
dispatchInfo.ResponseBody = []byte(fmt.Sprintf("dispatch resulted in status \"%s\". Could not read response body: error: %s", response.Status, err.Error()))
} else {
dispatchInfo.ResponseBody = body.Bytes()
@@ -350,7 +350,7 @@ func (d *Dispatcher) executeRequest(ctx context.Context, target duckv1.Addressab
}
var responseMessageBody []byte
- if readErr != nil && readErr != io.EOF {
+ if err != nil && err != io.EOF {
responseMessageBody = []byte(fmt.Sprintf("Failed to read response body: %s", err.Error()))
} else {
responseMessageBody = body.Bytes()
From 7f37d64498639449fcebc5d0d63e5f2e94678fc8 Mon Sep 17 00:00:00 2001
From: Pierangelo Di Pilato
Date: Wed, 12 Jun 2024 13:27:00 +0200
Subject: [PATCH 10/33] Add EventPolicy types (#7995)
* Add EventPolicy types
Signed-off-by: Pierangelo Di Pilato
* Run hack/update-codegen.sh
Signed-off-by: Pierangelo Di Pilato
* Add EventPolicy CRD
Signed-off-by: Pierangelo Di Pilato
* Fix boilerplate / style
Signed-off-by: Pierangelo Di Pilato
---------
Signed-off-by: Pierangelo Di Pilato
---
cmd/schema/main.go | 2 +
config/core/resources/eventpolicy.yaml | 181 +++++++
docs/eventing-api.md | 454 ++++++++++++++++++
hack/update-codegen.sh | 4 +-
pkg/apis/eventing/v1alpha1/doc.go | 20 +
.../v1alpha1/eventpolicy_conversion.go | 34 ++
.../v1alpha1/eventpolicy_conversion_test.go | 34 ++
.../eventing/v1alpha1/eventpolicy_defaults.go | 31 ++
.../v1alpha1/eventpolicy_defaults_test.go | 44 ++
.../v1alpha1/eventpolicy_lifecycle.go | 52 ++
.../v1alpha1/eventpolicy_lifecycle_test.go | 107 +++++
.../eventing/v1alpha1/eventpolicy_types.go | 168 +++++++
.../v1alpha1/eventpolicy_types_test.go | 39 ++
.../v1alpha1/eventpolicy_validation.go | 86 ++++
.../v1alpha1/eventpolicy_validation_test.go | 209 ++++++++
pkg/apis/eventing/v1alpha1/register.go | 53 ++
pkg/apis/eventing/v1alpha1/register_test.go | 52 ++
.../v1alpha1/zz_generated.deepcopy.go | 250 ++++++++++
pkg/client/clientset/versioned/clientset.go | 31 +-
.../versioned/fake/clientset_generated.go | 7 +
.../clientset/versioned/fake/register.go | 2 +
.../clientset/versioned/scheme/register.go | 2 +
.../versioned/typed/eventing/v1alpha1/doc.go | 20 +
.../eventing/v1alpha1/eventing_client.go | 107 +++++
.../typed/eventing/v1alpha1/eventpolicy.go | 195 ++++++++
.../typed/eventing/v1alpha1/fake/doc.go | 20 +
.../v1alpha1/fake/fake_eventing_client.go | 40 ++
.../v1alpha1/fake/fake_eventpolicy.go | 141 ++++++
.../eventing/v1alpha1/generated_expansion.go | 21 +
.../externalversions/eventing/interface.go | 8 +
.../eventing/v1alpha1/eventpolicy.go | 90 ++++
.../eventing/v1alpha1/interface.go | 45 ++
.../informers/externalversions/generic.go | 9 +-
.../v1alpha1/eventpolicy/eventpolicy.go | 52 ++
.../v1alpha1/eventpolicy/fake/fake.go | 40 ++
.../eventpolicy/filtered/eventpolicy.go | 65 +++
.../eventpolicy/filtered/fake/fake.go | 52 ++
.../v1alpha1/eventpolicy/controller.go | 170 +++++++
.../v1alpha1/eventpolicy/reconciler.go | 440 +++++++++++++++++
.../eventing/v1alpha1/eventpolicy/state.go | 97 ++++
.../listers/eventing/v1alpha1/eventpolicy.go | 99 ++++
.../eventing/v1alpha1/expansion_generated.go | 27 ++
42 files changed, 3587 insertions(+), 13 deletions(-)
create mode 100644 config/core/resources/eventpolicy.yaml
create mode 100644 pkg/apis/eventing/v1alpha1/doc.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventpolicy_conversion.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventpolicy_conversion_test.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventpolicy_defaults.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventpolicy_defaults_test.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventpolicy_lifecycle.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventpolicy_lifecycle_test.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventpolicy_types.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventpolicy_types_test.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventpolicy_validation.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventpolicy_validation_test.go
create mode 100644 pkg/apis/eventing/v1alpha1/register.go
create mode 100644 pkg/apis/eventing/v1alpha1/register_test.go
create mode 100644 pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
create mode 100644 pkg/client/clientset/versioned/typed/eventing/v1alpha1/doc.go
create mode 100644 pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go
create mode 100644 pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventpolicy.go
create mode 100644 pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/doc.go
create mode 100644 pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go
create mode 100644 pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventpolicy.go
create mode 100644 pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go
create mode 100644 pkg/client/informers/externalversions/eventing/v1alpha1/eventpolicy.go
create mode 100644 pkg/client/informers/externalversions/eventing/v1alpha1/interface.go
create mode 100644 pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/eventpolicy.go
create mode 100644 pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/fake/fake.go
create mode 100644 pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/filtered/eventpolicy.go
create mode 100644 pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/filtered/fake/fake.go
create mode 100644 pkg/client/injection/reconciler/eventing/v1alpha1/eventpolicy/controller.go
create mode 100644 pkg/client/injection/reconciler/eventing/v1alpha1/eventpolicy/reconciler.go
create mode 100644 pkg/client/injection/reconciler/eventing/v1alpha1/eventpolicy/state.go
create mode 100644 pkg/client/listers/eventing/v1alpha1/eventpolicy.go
create mode 100644 pkg/client/listers/eventing/v1alpha1/expansion_generated.go
diff --git a/cmd/schema/main.go b/cmd/schema/main.go
index 62b58d42937..2c38476a3cb 100644
--- a/cmd/schema/main.go
+++ b/cmd/schema/main.go
@@ -23,6 +23,7 @@ import (
"knative.dev/hack/schema/registry"
eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
flowsv1 "knative.dev/eventing/pkg/apis/flows/v1"
messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1"
@@ -47,6 +48,7 @@ func main() {
// Flows
registry.Register(&flowsv1.Sequence{})
registry.Register(&flowsv1.Parallel{})
+ registry.Register(&eventingv1alpha1.EventPolicy{})
if err := commands.New("knative.dev/eventing").Execute(); err != nil {
log.Fatal("Error during command execution: ", err)
diff --git a/config/core/resources/eventpolicy.yaml b/config/core/resources/eventpolicy.yaml
new file mode 100644
index 00000000000..159d379e5bb
--- /dev/null
+++ b/config/core/resources/eventpolicy.yaml
@@ -0,0 +1,181 @@
+# Copyright 2024 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: eventpolicies.eventing.knative.dev
+ labels:
+ knative.dev/crd-install: "true"
+ app.kubernetes.io/version: devel
+ app.kubernetes.io/name: knative-eventing
+spec:
+ group: eventing.knative.dev
+ versions:
+ - name: v1alpha1
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ type: object
+ properties:
+ spec:
+ description: Spec defines the desired state of the EventPolicy.
+ type: object
+ properties:
+ from:
+ description: From is the list of sources or oidc identities, which are allowed to send events to the targets (.spec.to).
+ type: array
+ items:
+ type: object
+ properties:
+ ref:
+ description: Ref contains a direct reference to a resource which is allowed to send events to the target.
+ type: object
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ namespace:
+ description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ This is optional field, it gets defaulted to the object holding it if left out.'
+ type: string
+ sub:
+ description: Sub sets the OIDC identity name to be allowed to send events to the target. It is also possible to set a glob-like pattern to match any suffix.
+ type: string
+ to:
+ description: To lists all resources for which this policy applies. Resources in this list must act like an ingress and have an audience. The resources are part of the same namespace as the EventPolicy. An empty list means it applies to all resources in the EventPolicies namespace
+ type: array
+ items:
+ type: object
+ properties:
+ ref:
+ description: Ref contains the direct reference to a target
+ type: object
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ selector:
+ description: Selector contains a selector to group targets
+ type: object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ type: array
+ items:
+ type: object
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ type: array
+ items:
+ type: string
+ matchLabels:
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ status:
+ description: Status represents the current state of the EventPolicy. This data may be out of date.
+ type: object
+ properties:
+ annotations:
+ description: Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ conditions:
+ description: Conditions the latest available observations of a resource's current state.
+ type: array
+ items:
+ type: object
+ required:
+ - type
+ - status
+ properties:
+ lastTransitionTime:
+ description: LastTransitionTime is the last time the condition transitioned from one status to another. We use VolatileTime in place of metav1.Time to exclude this from creating equality.Semantic differences (all other things held constant).
+ type: string
+ message:
+ description: A human readable message indicating details about the transition.
+ type: string
+ reason:
+ description: The reason for the condition's last transition.
+ type: string
+ severity:
+ description: Severity with which to treat failures of this type of condition. When this is not specified, it defaults to Error.
+ type: string
+ status:
+ description: Status of the condition, one of True, False, Unknown.
+ type: string
+ type:
+ description: Type of condition.
+ type: string
+ from:
+ description: From is the list of resolved oidc identities from .spec.from
+ type: array
+ items:
+ type: string
+ observedGeneration:
+ description: ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.
+ type: integer
+ format: int64
+
+ additionalPrinterColumns:
+ - name: Ready
+ type: string
+ jsonPath: ".status.conditions[?(@.type==\"Ready\")].status"
+ - name: Reason
+ type: string
+ jsonPath: ".status.conditions[?(@.type==\"Ready\")].reason"
+ names:
+ kind: EventPolicy
+ plural: eventpolicies
+ singular: eventpolicy
+ categories:
+ - all
+ - knative
+ - eventing
+ scope: Namespaced
+ conversion:
+ strategy: Webhook
+ webhook:
+ conversionReviewVersions: ["v1", "v1beta1"]
+ clientConfig:
+ service:
+ name: eventing-webhook
+ namespace: knative-eventing
diff --git a/docs/eventing-api.md b/docs/eventing-api.md
index 6bec7a309d8..091344ae66c 100644
--- a/docs/eventing-api.md
+++ b/docs/eventing-api.md
@@ -13,6 +13,9 @@
eventing.knative.dev/v1
+eventing.knative.dev/v1alpha1
+
+
eventing.knative.dev/v1beta1
@@ -2488,6 +2491,457 @@ knative.dev/pkg/apis/duck/v1.AuthStatus
+eventing.knative.dev/v1alpha1
+
+
Package v1alpha1 is the v1alpha1 version of the API.
+
+Resource Types:
+
+EventPolicy
+
+
+
EventPolicy represents a policy for addressable resources (Broker, Channel, sinks).
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+apiVersion
+string |
+
+
+eventing.knative.dev/v1alpha1
+
+ |
+
+
+
+kind
+string
+ |
+EventPolicy |
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+(Optional)
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+EventPolicySpec
+
+
+ |
+
+ Spec defines the desired state of the EventPolicy.
+
+
+
+
+
+to
+
+
+[]EventPolicySpecTo
+
+
+ |
+
+(Optional)
+ To lists all resources for which this policy applies.
+Resources in this list must act like an ingress and have an audience.
+The resources are part of the same namespace as the EventPolicy.
+An empty list means it applies to all resources in the EventPolicies namespace
+ |
+
+
+
+from
+
+
+[]EventPolicySpecFrom
+
+
+ |
+
+ From is the list of sources or oidc identities, which are allowed to send events to the targets (.spec.to).
+ |
+
+
+ |
+
+
+
+status
+
+
+EventPolicyStatus
+
+
+ |
+
+(Optional)
+ Status represents the current state of the EventPolicy.
+This data may be out of date.
+ |
+
+
+
+EventPolicyFromReference
+
+
+(Appears on:EventPolicySpecFrom)
+
+
+
+
+EventPolicySelector
+
+
+(Appears on:EventPolicySpecTo)
+
+
+
+
+EventPolicySpec
+
+
+(Appears on:EventPolicy)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+to
+
+
+[]EventPolicySpecTo
+
+
+ |
+
+(Optional)
+ To lists all resources for which this policy applies.
+Resources in this list must act like an ingress and have an audience.
+The resources are part of the same namespace as the EventPolicy.
+An empty list means it applies to all resources in the EventPolicies namespace
+ |
+
+
+
+from
+
+
+[]EventPolicySpecFrom
+
+
+ |
+
+ From is the list of sources or oidc identities, which are allowed to send events to the targets (.spec.to).
+ |
+
+
+
+EventPolicySpecFrom
+
+
+(Appears on:EventPolicySpec)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+ref
+
+
+EventPolicyFromReference
+
+
+ |
+
+(Optional)
+ Ref contains a direct reference to a resource which is allowed to send events to the target.
+ |
+
+
+
+sub
+
+string
+
+ |
+
+(Optional)
+ Sub sets the OIDC identity name to be allowed to send events to the target.
+It is also possible to set a glob-like pattern to match any suffix.
+ |
+
+
+
+EventPolicySpecTo
+
+
+(Appears on:EventPolicySpec)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+ref
+
+
+EventPolicyToReference
+
+
+ |
+
+(Optional)
+ Ref contains the direct reference to a target
+ |
+
+
+
+selector
+
+
+EventPolicySelector
+
+
+ |
+
+(Optional)
+ Selector contains a selector to group targets
+ |
+
+
+
+EventPolicyStatus
+
+
+(Appears on:EventPolicy)
+
+
+
EventPolicyStatus represents the current state of a EventPolicy.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+Status
+
+
+knative.dev/pkg/apis/duck/v1.Status
+
+
+ |
+
+
+(Members of Status are embedded into this type.)
+
+inherits duck/v1 Status, which currently provides:
+* ObservedGeneration - the ‘Generation’ of the Service that was last processed by the controller.
+* Conditions - the latest available observations of a resource’s current state.
+ |
+
+
+
+from
+
+[]string
+
+ |
+
+ From is the list of resolved oidc identities from .spec.from
+ |
+
+
+
+EventPolicyToReference
+
+
+(Appears on:EventPolicySpecTo)
+
+
+
+
+
eventing.knative.dev/v1beta1
Package v1beta1 is the v1beta1 version of the API.
diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh
index 3020dd85815..fdeb803f9ca 100755
--- a/hack/update-codegen.sh
+++ b/hack/update-codegen.sh
@@ -38,7 +38,7 @@ group "Kubernetes Codegen"
# instead of the $GOPATH directly. For normal projects this can be dropped.
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
knative.dev/eventing/pkg/client knative.dev/eventing/pkg/apis \
- "sinks:v1alpha1 eventing:v1beta1 eventing:v1beta2 eventing:v1beta3 eventing:v1 messaging:v1 flows:v1 sources:v1beta2 sources:v1" \
+ "sinks:v1alpha1 eventing:v1alpha1 eventing:v1beta1 eventing:v1beta2 eventing:v1beta3 eventing:v1 messaging:v1 flows:v1 sources:v1beta2 sources:v1" \
--go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt
# Deep copy config
@@ -59,7 +59,7 @@ group "Knative Codegen"
# Knative Injection
${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \
knative.dev/eventing/pkg/client knative.dev/eventing/pkg/apis \
- "sinks:v1alpha1 eventing:v1beta1 eventing:v1beta2 eventing:v1beta3 eventing:v1 messaging:v1 flows:v1 sources:v1beta2 sources:v1 duck:v1beta1 duck:v1" \
+ "sinks:v1alpha1 eventing:v1alpha1 eventing:v1beta1 eventing:v1beta2 eventing:v1beta3 eventing:v1 messaging:v1 flows:v1 sources:v1beta2 sources:v1 duck:v1beta1 duck:v1" \
--go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt
group "Generating API reference docs"
diff --git a/pkg/apis/eventing/v1alpha1/doc.go b/pkg/apis/eventing/v1alpha1/doc.go
new file mode 100644
index 00000000000..1f222246309
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1alpha1 is the v1alpha1 version of the API.
+// +k8s:deepcopy-gen=package
+// +groupName=eventing.knative.dev
+package v1alpha1
diff --git a/pkg/apis/eventing/v1alpha1/eventpolicy_conversion.go b/pkg/apis/eventing/v1alpha1/eventpolicy_conversion.go
new file mode 100644
index 00000000000..ed74482e2d4
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventpolicy_conversion.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+func (ep *EventPolicy) ConvertTo(ctx context.Context, obj apis.Convertible) error {
+ return fmt.Errorf("v1alpha1 is the highest known version, got: %T", obj)
+}
+
+// ConvertFrom implements apis.Convertible
+func (ep *EventPolicy) ConvertFrom(ctx context.Context, obj apis.Convertible) error {
+ return fmt.Errorf("v1alpha1 is the highest known version, got: %T", obj)
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventpolicy_conversion_test.go b/pkg/apis/eventing/v1alpha1/eventpolicy_conversion_test.go
new file mode 100644
index 00000000000..31188a862ee
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventpolicy_conversion_test.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+ "testing"
+)
+
+func TestEventPolicyConversionHighestVersion(t *testing.T) {
+ good, bad := &EventPolicy{}, &EventPolicy{}
+
+ if err := good.ConvertTo(context.Background(), bad); err == nil {
+ t.Errorf("ConvertTo() = %#v, wanted error", bad)
+ }
+
+ if err := good.ConvertFrom(context.Background(), bad); err == nil {
+ t.Errorf("ConvertFrom() = %#v, wanted error", good)
+ }
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventpolicy_defaults.go b/pkg/apis/eventing/v1alpha1/eventpolicy_defaults.go
new file mode 100644
index 00000000000..3e52f5bd482
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventpolicy_defaults.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+)
+
+func (ep *EventPolicy) SetDefaults(ctx context.Context) {
+ ctx = apis.WithinParent(ctx, ep.ObjectMeta)
+ ep.Spec.SetDefaults(ctx)
+}
+
+func (ets *EventPolicySpec) SetDefaults(ctx context.Context) {
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventpolicy_defaults_test.go b/pkg/apis/eventing/v1alpha1/eventpolicy_defaults_test.go
new file mode 100644
index 00000000000..09a377b082b
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventpolicy_defaults_test.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+)
+
+func TestEventPolicyDefaults(t *testing.T) {
+ testCases := map[string]struct {
+ initial EventPolicy
+ expected EventPolicy
+ }{
+ "nil spec": {
+ initial: EventPolicy{},
+ expected: EventPolicy{},
+ },
+ }
+ for n, tc := range testCases {
+ t.Run(n, func(t *testing.T) {
+ tc.initial.SetDefaults(context.TODO())
+ if diff := cmp.Diff(tc.expected, tc.initial); diff != "" {
+ t.Fatal("Unexpected defaults (-want, +got):", diff)
+ }
+ })
+ }
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventpolicy_lifecycle.go b/pkg/apis/eventing/v1alpha1/eventpolicy_lifecycle.go
new file mode 100644
index 00000000000..30c8575eac9
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventpolicy_lifecycle.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "knative.dev/pkg/apis"
+)
+
+var eventPolicyCondSet = apis.NewLivingConditionSet()
+
+const (
+ EventPolicyConditionReady = apis.ConditionReady
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*EventPolicy) GetConditionSet() apis.ConditionSet {
+ return eventPolicyCondSet
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (et *EventPolicyStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return eventPolicyCondSet.Manage(et).GetCondition(t)
+}
+
+// IsReady returns true if the resource is ready overall.
+func (et *EventPolicyStatus) IsReady() bool {
+ return et.GetTopLevelCondition().IsTrue()
+}
+
+// GetTopLevelCondition returns the top level Condition.
+func (et *EventPolicyStatus) GetTopLevelCondition() *apis.Condition {
+ return eventPolicyCondSet.Manage(et).GetTopLevelCondition()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (et *EventPolicyStatus) InitializeConditions() {
+ eventPolicyCondSet.Manage(et).InitializeConditions()
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventpolicy_lifecycle_test.go b/pkg/apis/eventing/v1alpha1/eventpolicy_lifecycle_test.go
new file mode 100644
index 00000000000..1f18f054a1f
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventpolicy_lifecycle_test.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+
+ corev1 "k8s.io/api/core/v1"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+)
+
+var (
+ eventPolicyConditionReady = apis.Condition{
+ Type: EventPolicyConditionReady,
+ Status: corev1.ConditionTrue,
+ }
+
+ ignoreAllButTypeAndStatus = cmpopts.IgnoreFields(
+ apis.Condition{},
+ "LastTransitionTime", "Message", "Reason", "Severity")
+)
+
+func TestEventPolicyGetConditionSet(t *testing.T) {
+ r := &EventPolicy{}
+
+ if got, want := r.GetConditionSet().GetTopLevelConditionType(), apis.ConditionReady; got != want {
+ t.Errorf("GetTopLevelCondition=%v, want=%v", got, want)
+ }
+}
+
+func TestEventPolicyGetCondition(t *testing.T) {
+ tests := []struct {
+ name string
+ ets *EventPolicyStatus
+ condQuery apis.ConditionType
+ want *apis.Condition
+ }{{
+ name: "single condition",
+ ets: &EventPolicyStatus{
+ Status: duckv1.Status{
+ Conditions: []apis.Condition{
+ eventPolicyConditionReady,
+ },
+ },
+ },
+ condQuery: apis.ConditionReady,
+ want: &eventPolicyConditionReady,
+ }}
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := test.ets.GetCondition(test.condQuery)
+ if diff := cmp.Diff(test.want, got); diff != "" {
+ t.Error("unexpected condition (-want, +got) =", diff)
+ }
+ })
+ }
+}
+
+func TestEventPolicyInitializeConditions(t *testing.T) {
+ tests := []struct {
+ name string
+ ets *EventPolicyStatus
+ want *EventPolicyStatus
+ }{
+ {
+ name: "empty",
+ ets: &EventPolicyStatus{},
+ want: &EventPolicyStatus{
+ Status: duckv1.Status{
+ Conditions: []apis.Condition{{
+ Type: EventPolicyConditionReady,
+ Status: corev1.ConditionUnknown,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ test.ets.InitializeConditions()
+ if diff := cmp.Diff(test.want, test.ets, ignoreAllButTypeAndStatus); diff != "" {
+ t.Error("unexpected conditions (-want, +got) =", diff)
+ }
+ })
+ }
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventpolicy_types.go b/pkg/apis/eventing/v1alpha1/eventpolicy_types.go
new file mode 100644
index 00000000000..53d62653444
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventpolicy_types.go
@@ -0,0 +1,168 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EventPolicy represents a policy for addressable resources (Broker, Channel, sinks).
+type EventPolicy struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired state of the EventPolicy.
+ Spec EventPolicySpec `json:"spec,omitempty"`
+
+ // Status represents the current state of the EventPolicy.
+ // This data may be out of date.
+ // +optional
+ Status EventPolicyStatus `json:"status,omitempty"`
+}
+
+var (
+ // Check that EventPolicy can be validated, can be defaulted, and has immutable fields.
+ _ apis.Validatable = (*EventPolicy)(nil)
+ _ apis.Defaultable = (*EventPolicy)(nil)
+
+ // Check that EventPolicy can return its spec untyped.
+ _ apis.HasSpec = (*EventPolicy)(nil)
+
+ _ runtime.Object = (*EventPolicy)(nil)
+
+ // Check that we can create OwnerReferences to an EventPolicy.
+ _ kmeta.OwnerRefable = (*EventPolicy)(nil)
+
+ // Check that the type conforms to the duck Knative Resource shape.
+ _ duckv1.KRShaped = (*EventPolicy)(nil)
+)
+
+type EventPolicySpec struct {
+ // To lists all resources for which this policy applies.
+ // Resources in this list must act like an ingress and have an audience.
+ // The resources are part of the same namespace as the EventPolicy.
+ // An empty list means it applies to all resources in the EventPolicies namespace
+ // +optional
+ To []EventPolicySpecTo `json:"to,omitempty"`
+
+ // From is the list of sources or oidc identities, which are allowed to send events to the targets (.spec.to).
+ From []EventPolicySpecFrom `json:"from,omitempty"`
+}
+
+type EventPolicySpecTo struct {
+ // Ref contains the direct reference to a target
+ // +optional
+ Ref *EventPolicyToReference `json:"ref,omitempty"`
+
+ // Selector contains a selector to group targets
+ // +optional
+ Selector *EventPolicySelector `json:"selector,omitempty"`
+}
+
+type EventPolicySpecFrom struct {
+ // Ref contains a direct reference to a resource which is allowed to send events to the target.
+ // +optional
+ Ref *EventPolicyFromReference `json:"ref,omitempty"`
+
+ // Sub sets the OIDC identity name to be allowed to send events to the target.
+ // It is also possible to set a glob-like pattern to match any suffix.
+ // +optional
+ Sub *string `json:"sub,omitempty"`
+}
+
+type EventPolicyToReference struct {
+ // API version of the referent.
+ APIVersion string `json:"apiVersion,omitempty"`
+
+ // Kind of the referent.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ Kind string `json:"kind"`
+
+ // Name of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ Name string `json:"name"`
+}
+
+type EventPolicyFromReference struct {
+ // API version of the referent.
+ APIVersion string `json:"apiVersion,omitempty"`
+
+ // Kind of the referent.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ Kind string `json:"kind"`
+
+ // Name of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ Name string `json:"name"`
+
+ // Namespace of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ // This is optional field, it gets defaulted to the object holding it if left out.
+ // +optional
+ Namespace string `json:"namespace,omitempty"`
+}
+
+type EventPolicySelector struct {
+ *metav1.LabelSelector `json:",inline"`
+ *metav1.TypeMeta `json:",inline"`
+}
+
+// EventPolicyStatus represents the current state of a EventPolicy.
+type EventPolicyStatus struct {
+ // inherits duck/v1 Status, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller.
+ // * Conditions - the latest available observations of a resource's current state.
+ duckv1.Status `json:",inline"`
+
+ // From is the list of resolved oidc identities from .spec.from
+ From []string `json:"from,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EventPolicyList is a collection of EventPolicy.
+type EventPolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []EventPolicy `json:"items"`
+}
+
+// GetGroupVersionKind returns GroupVersionKind for EventPolicy
+func (ep *EventPolicy) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("EventPolicy")
+}
+
+// GetUntypedSpec returns the spec of the EventPolicy.
+func (ep *EventPolicy) GetUntypedSpec() interface{} {
+ return ep.Spec
+}
+
+// GetStatus retrieves the status of the EventPolicy. Implements the KRShaped interface.
+func (ep *EventPolicy) GetStatus() *duckv1.Status {
+ return &ep.Status.Status
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventpolicy_types_test.go b/pkg/apis/eventing/v1alpha1/eventpolicy_types_test.go
new file mode 100644
index 00000000000..b8dd6913ebe
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventpolicy_types_test.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "testing"
+)
+
+func TestEventPolicyGetStatus(t *testing.T) {
+ r := &EventPolicy{
+ Status: EventPolicyStatus{},
+ }
+ if got, want := r.GetStatus(), &r.Status.Status; got != want {
+ t.Errorf("GetStatus=%v, want=%v", got, want)
+ }
+}
+
+func TestEventPolicy_GetGroupVersionKind(t *testing.T) {
+ src := EventPolicy{}
+ gvk := src.GetGroupVersionKind()
+
+ if gvk.Kind != "EventPolicy" {
+ t.Errorf("Should be EventPolicy.")
+ }
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventpolicy_validation.go b/pkg/apis/eventing/v1alpha1/eventpolicy_validation.go
new file mode 100644
index 00000000000..6c4eafb5caa
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventpolicy_validation.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+)
+
+func (ep *EventPolicy) Validate(ctx context.Context) *apis.FieldError {
+ return ep.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (ets *EventPolicySpec) Validate(ctx context.Context) *apis.FieldError {
+ var err *apis.FieldError
+ for i, f := range ets.From {
+ if f.Ref == nil && (f.Sub == nil || *f.Sub == "") {
+ err = err.Also(apis.ErrMissingOneOf("ref", "sub").ViaFieldIndex("from", i))
+ }
+ if f.Ref != nil && f.Sub != nil {
+ err = err.Also(apis.ErrMultipleOneOf("ref", "sub").ViaFieldIndex("from", i))
+ }
+ err = err.Also(f.Ref.Validate().ViaField("ref").ViaFieldIndex("from", i))
+ }
+
+ for i, t := range ets.To {
+ if t.Ref == nil && t.Selector == nil {
+ err = err.Also(apis.ErrMissingOneOf("ref", "selector").ViaFieldIndex("to", i))
+ }
+ if t.Ref != nil && t.Selector != nil {
+ err = err.Also(apis.ErrMultipleOneOf("ref", "selector").ViaFieldIndex("to", i))
+ }
+ if t.Ref != nil {
+ err = err.Also(t.Ref.Validate().ViaField("ref").ViaFieldIndex("to", i))
+ }
+ }
+
+ return err
+}
+
+func (r *EventPolicyFromReference) Validate() *apis.FieldError {
+ if r == nil {
+ return nil
+ }
+
+ var err *apis.FieldError
+ if r.Kind == "" {
+ err = err.Also(apis.ErrMissingField("kind"))
+ }
+ if r.APIVersion == "" {
+ err = err.Also(apis.ErrMissingField("apiVersion"))
+ }
+ if r.Name == "" {
+ err = err.Also(apis.ErrMissingField("name"))
+ }
+ return err
+}
+
+func (r *EventPolicyToReference) Validate() *apis.FieldError {
+ var err *apis.FieldError
+ if r.Kind == "" {
+ err = err.Also(apis.ErrMissingField("kind"))
+ }
+ if r.APIVersion == "" {
+ err = err.Also(apis.ErrMissingField("apiVersion"))
+ }
+ if r.Name == "" {
+ err = err.Also(apis.ErrMissingField("name"))
+ }
+ return err
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventpolicy_validation_test.go b/pkg/apis/eventing/v1alpha1/eventpolicy_validation_test.go
new file mode 100644
index 00000000000..c4b388b0291
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventpolicy_validation_test.go
@@ -0,0 +1,209 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "knative.dev/pkg/apis"
+ "knative.dev/pkg/ptr"
+)
+
+func TestEventPolicySpecValidation(t *testing.T) {
+ tests := []struct {
+ name string
+ ep *EventPolicy
+ want *apis.FieldError
+ }{
+ {
+ name: "valid, empty",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{},
+ },
+ want: func() *apis.FieldError {
+ return nil
+ }(),
+ },
+ {
+ name: "invalid, missing from.ref and from.sub",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ From: []EventPolicySpecFrom{{}},
+ },
+ },
+ want: func() *apis.FieldError {
+ return apis.ErrMissingOneOf("ref", "sub").ViaFieldIndex("from", 0).ViaField("spec")
+ }(),
+ },
+ {
+ name: "invalid, from.ref missing name",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ From: []EventPolicySpecFrom{{
+ Ref: &EventPolicyFromReference{
+ APIVersion: "a",
+ Kind: "b",
+ },
+ }},
+ },
+ },
+ want: func() *apis.FieldError {
+ return apis.ErrMissingField("name").ViaField("ref").ViaFieldIndex("from", 0).ViaField("spec")
+ }(),
+ },
+ {
+ name: "invalid, from.ref missing kind",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ From: []EventPolicySpecFrom{{
+ Ref: &EventPolicyFromReference{
+ APIVersion: "a",
+ Name: "b",
+ },
+ }},
+ },
+ },
+ want: func() *apis.FieldError {
+ return apis.ErrMissingField("kind").ViaField("ref").ViaFieldIndex("from", 0).ViaField("spec")
+ }(),
+ },
+ {
+ name: "invalid, from.ref missing apiVersion",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ From: []EventPolicySpecFrom{{
+ Ref: &EventPolicyFromReference{
+ Kind: "a",
+ Name: "b",
+ },
+ }},
+ },
+ },
+ want: func() *apis.FieldError {
+ return apis.ErrMissingField("apiVersion").ViaField("ref").ViaFieldIndex("from", 0).ViaField("spec")
+ }(),
+ },
+ {
+ name: "invalid, bot from.ref and from.sub set",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ From: []EventPolicySpecFrom{{
+ Ref: &EventPolicyFromReference{
+ APIVersion: "a",
+ Kind: "b",
+ Name: "c",
+ },
+ Sub: ptr.String("abc"),
+ }},
+ },
+ },
+ want: func() *apis.FieldError {
+ return apis.ErrMultipleOneOf("ref", "sub").ViaFieldIndex("from", 0).ViaField("spec")
+ }(),
+ },
+ {
+ name: "invalid, missing to.ref and to.selector",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ To: []EventPolicySpecTo{{}},
+ },
+ },
+ want: func() *apis.FieldError {
+ return apis.ErrMissingOneOf("ref", "selector").ViaFieldIndex("to", 0).ViaField("spec")
+ }(),
+ },
+ {
+ name: "invalid, both to.ref and to.selector set",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ To: []EventPolicySpecTo{
+ {
+ Ref: &EventPolicyToReference{
+ APIVersion: "a",
+ Kind: "b",
+ Name: "c",
+ },
+ Selector: &EventPolicySelector{},
+ },
+ },
+ },
+ },
+ want: func() *apis.FieldError {
+ return apis.ErrMultipleOneOf("ref", "selector").ViaFieldIndex("to", 0).ViaField("spec")
+ }(),
+ },
+ {
+ name: "invalid, to.ref missing name",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ To: []EventPolicySpecTo{{
+ Ref: &EventPolicyToReference{
+ APIVersion: "a",
+ Kind: "b",
+ },
+ }},
+ },
+ },
+ want: func() *apis.FieldError {
+ return apis.ErrMissingField("name").ViaField("ref").ViaFieldIndex("to", 0).ViaField("spec")
+ }(),
+ },
+ {
+ name: "invalid, to.ref missing kind",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ To: []EventPolicySpecTo{{
+ Ref: &EventPolicyToReference{
+ APIVersion: "a",
+ Name: "b",
+ },
+ }},
+ },
+ },
+ want: func() *apis.FieldError {
+ return apis.ErrMissingField("kind").ViaField("ref").ViaFieldIndex("to", 0).ViaField("spec")
+ }(),
+ },
+ {
+ name: "invalid, to.ref missing apiVersion",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ To: []EventPolicySpecTo{{
+ Ref: &EventPolicyToReference{
+ Kind: "a",
+ Name: "b",
+ },
+ }},
+ },
+ },
+ want: func() *apis.FieldError {
+ return apis.ErrMissingField("apiVersion").ViaField("ref").ViaFieldIndex("to", 0).ViaField("spec")
+ }(),
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := test.ep.Validate(context.TODO())
+ if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" {
+ t.Errorf("%s: Validate EventPolicySpec (-want, +got) = %v", test.name, diff)
+ }
+ })
+ }
+}
diff --git a/pkg/apis/eventing/v1alpha1/register.go b/pkg/apis/eventing/v1alpha1/register.go
new file mode 100644
index 00000000000..c6f3e98cd0b
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "knative.dev/eventing/pkg/apis/eventing"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: eventing.GroupName, Version: "v1alpha1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &EventPolicy{},
+ &EventPolicyList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/pkg/apis/eventing/v1alpha1/register_test.go b/pkg/apis/eventing/v1alpha1/register_test.go
new file mode 100644
index 00000000000..8fb9e403809
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/register_test.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "testing"
+
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func TestKind(t *testing.T) {
+ schemaGroupKind := Kind("EventPolicy")
+ if schemaGroupKind.Kind != "EventPolicy" || schemaGroupKind.Group != "eventing.knative.dev" {
+ t.Errorf("Unexpected GroupKind: %+v", schemaGroupKind)
+ }
+}
+
+func TestResource(t *testing.T) {
+ schemaGroupResource := Resource("EventPolicy")
+ if schemaGroupResource.Group != "eventing.knative.dev" || schemaGroupResource.Resource != "EventPolicy" {
+ t.Errorf("Unexpected GroupResource: %+v", schemaGroupResource)
+ }
+}
+
+func TestKnownTypes(t *testing.T) {
+ scheme := runtime.NewScheme()
+ addKnownTypes(scheme)
+ types := scheme.KnownTypes(SchemeGroupVersion)
+
+ for _, name := range []string{
+ "EventPolicy",
+ "EventPolicyList",
+ } {
+ if _, ok := types[name]; !ok {
+ t.Errorf("Did not find %q as registered type", name)
+ }
+ }
+}
diff --git a/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 00000000000..998b577cad3
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,250 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventPolicy) DeepCopyInto(out *EventPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventPolicy.
+func (in *EventPolicy) DeepCopy() *EventPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(EventPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventPolicyFromReference) DeepCopyInto(out *EventPolicyFromReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventPolicyFromReference.
+func (in *EventPolicyFromReference) DeepCopy() *EventPolicyFromReference {
+ if in == nil {
+ return nil
+ }
+ out := new(EventPolicyFromReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventPolicyList) DeepCopyInto(out *EventPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EventPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventPolicyList.
+func (in *EventPolicyList) DeepCopy() *EventPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(EventPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventPolicySelector) DeepCopyInto(out *EventPolicySelector) {
+ *out = *in
+ if in.LabelSelector != nil {
+ in, out := &in.LabelSelector, &out.LabelSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TypeMeta != nil {
+ in, out := &in.TypeMeta, &out.TypeMeta
+ *out = new(v1.TypeMeta)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventPolicySelector.
+func (in *EventPolicySelector) DeepCopy() *EventPolicySelector {
+ if in == nil {
+ return nil
+ }
+ out := new(EventPolicySelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventPolicySpec) DeepCopyInto(out *EventPolicySpec) {
+ *out = *in
+ if in.To != nil {
+ in, out := &in.To, &out.To
+ *out = make([]EventPolicySpecTo, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.From != nil {
+ in, out := &in.From, &out.From
+ *out = make([]EventPolicySpecFrom, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventPolicySpec.
+func (in *EventPolicySpec) DeepCopy() *EventPolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EventPolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventPolicySpecFrom) DeepCopyInto(out *EventPolicySpecFrom) {
+ *out = *in
+ if in.Ref != nil {
+ in, out := &in.Ref, &out.Ref
+ *out = new(EventPolicyFromReference)
+ **out = **in
+ }
+ if in.Sub != nil {
+ in, out := &in.Sub, &out.Sub
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventPolicySpecFrom.
+func (in *EventPolicySpecFrom) DeepCopy() *EventPolicySpecFrom {
+ if in == nil {
+ return nil
+ }
+ out := new(EventPolicySpecFrom)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventPolicySpecTo) DeepCopyInto(out *EventPolicySpecTo) {
+ *out = *in
+ if in.Ref != nil {
+ in, out := &in.Ref, &out.Ref
+ *out = new(EventPolicyToReference)
+ **out = **in
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(EventPolicySelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventPolicySpecTo.
+func (in *EventPolicySpecTo) DeepCopy() *EventPolicySpecTo {
+ if in == nil {
+ return nil
+ }
+ out := new(EventPolicySpecTo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventPolicyStatus) DeepCopyInto(out *EventPolicyStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ if in.From != nil {
+ in, out := &in.From, &out.From
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventPolicyStatus.
+func (in *EventPolicyStatus) DeepCopy() *EventPolicyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(EventPolicyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventPolicyToReference) DeepCopyInto(out *EventPolicyToReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventPolicyToReference.
+func (in *EventPolicyToReference) DeepCopy() *EventPolicyToReference {
+ if in == nil {
+ return nil
+ }
+ out := new(EventPolicyToReference)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go
index 169fc4b748c..0ca8d7350b7 100644
--- a/pkg/client/clientset/versioned/clientset.go
+++ b/pkg/client/clientset/versioned/clientset.go
@@ -26,6 +26,7 @@ import (
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
eventingv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1"
+ eventingv1alpha1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1"
eventingv1beta1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1"
eventingv1beta2 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2"
eventingv1beta3 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3"
@@ -38,6 +39,7 @@ import (
type Interface interface {
Discovery() discovery.DiscoveryInterface
+ EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interface
EventingV1beta1() eventingv1beta1.EventingV1beta1Interface
EventingV1beta2() eventingv1beta2.EventingV1beta2Interface
EventingV1beta3() eventingv1beta3.EventingV1beta3Interface
@@ -52,15 +54,21 @@ type Interface interface {
// Clientset contains the clients for groups.
type Clientset struct {
*discovery.DiscoveryClient
- eventingV1beta1 *eventingv1beta1.EventingV1beta1Client
- eventingV1beta2 *eventingv1beta2.EventingV1beta2Client
- eventingV1beta3 *eventingv1beta3.EventingV1beta3Client
- eventingV1 *eventingv1.EventingV1Client
- flowsV1 *flowsv1.FlowsV1Client
- messagingV1 *messagingv1.MessagingV1Client
- sinksV1alpha1 *sinksv1alpha1.SinksV1alpha1Client
- sourcesV1beta2 *sourcesv1beta2.SourcesV1beta2Client
- sourcesV1 *sourcesv1.SourcesV1Client
+ eventingV1alpha1 *eventingv1alpha1.EventingV1alpha1Client
+ eventingV1beta1 *eventingv1beta1.EventingV1beta1Client
+ eventingV1beta2 *eventingv1beta2.EventingV1beta2Client
+ eventingV1beta3 *eventingv1beta3.EventingV1beta3Client
+ eventingV1 *eventingv1.EventingV1Client
+ flowsV1 *flowsv1.FlowsV1Client
+ messagingV1 *messagingv1.MessagingV1Client
+ sinksV1alpha1 *sinksv1alpha1.SinksV1alpha1Client
+ sourcesV1beta2 *sourcesv1beta2.SourcesV1beta2Client
+ sourcesV1 *sourcesv1.SourcesV1Client
+}
+
+// EventingV1alpha1 retrieves the EventingV1alpha1Client
+func (c *Clientset) EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interface {
+ return c.eventingV1alpha1
}
// EventingV1beta1 retrieves the EventingV1beta1Client
@@ -152,6 +160,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
var cs Clientset
var err error
+ cs.eventingV1alpha1, err = eventingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
cs.eventingV1beta1, err = eventingv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
@@ -209,6 +221,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
+ cs.eventingV1alpha1 = eventingv1alpha1.New(c)
cs.eventingV1beta1 = eventingv1beta1.New(c)
cs.eventingV1beta2 = eventingv1beta2.New(c)
cs.eventingV1beta3 = eventingv1beta3.New(c)
diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go
index 8c17bc626b0..ce9c3649485 100644
--- a/pkg/client/clientset/versioned/fake/clientset_generated.go
+++ b/pkg/client/clientset/versioned/fake/clientset_generated.go
@@ -27,6 +27,8 @@ import (
clientset "knative.dev/eventing/pkg/client/clientset/versioned"
eventingv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1"
fakeeventingv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/fake"
+ eventingv1alpha1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1"
+ fakeeventingv1alpha1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake"
eventingv1beta1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1"
fakeeventingv1beta1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1/fake"
eventingv1beta2 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2"
@@ -95,6 +97,11 @@ var (
_ testing.FakeClient = &Clientset{}
)
+// EventingV1alpha1 retrieves the EventingV1alpha1Client
+func (c *Clientset) EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interface {
+ return &fakeeventingv1alpha1.FakeEventingV1alpha1{Fake: &c.Fake}
+}
+
// EventingV1beta1 retrieves the EventingV1beta1Client
func (c *Clientset) EventingV1beta1() eventingv1beta1.EventingV1beta1Interface {
return &fakeeventingv1beta1.FakeEventingV1beta1{Fake: &c.Fake}
diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go
index fdd99cc3855..69946bd6e54 100644
--- a/pkg/client/clientset/versioned/fake/register.go
+++ b/pkg/client/clientset/versioned/fake/register.go
@@ -25,6 +25,7 @@ import (
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
eventingv1beta1 "knative.dev/eventing/pkg/apis/eventing/v1beta1"
eventingv1beta2 "knative.dev/eventing/pkg/apis/eventing/v1beta2"
eventingv1beta3 "knative.dev/eventing/pkg/apis/eventing/v1beta3"
@@ -39,6 +40,7 @@ var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
+ eventingv1alpha1.AddToScheme,
eventingv1beta1.AddToScheme,
eventingv1beta2.AddToScheme,
eventingv1beta3.AddToScheme,
diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go
index 447af7253c5..e037c6c1749 100644
--- a/pkg/client/clientset/versioned/scheme/register.go
+++ b/pkg/client/clientset/versioned/scheme/register.go
@@ -25,6 +25,7 @@ import (
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
eventingv1beta1 "knative.dev/eventing/pkg/apis/eventing/v1beta1"
eventingv1beta2 "knative.dev/eventing/pkg/apis/eventing/v1beta2"
eventingv1beta3 "knative.dev/eventing/pkg/apis/eventing/v1beta3"
@@ -39,6 +40,7 @@ var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
+ eventingv1alpha1.AddToScheme,
eventingv1beta1.AddToScheme,
eventingv1beta2.AddToScheme,
eventingv1beta3.AddToScheme,
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/doc.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/doc.go
new file mode 100644
index 00000000000..0b13fd8e001
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go
new file mode 100644
index 00000000000..e901caf43a4
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "net/http"
+
+ rest "k8s.io/client-go/rest"
+ v1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+type EventingV1alpha1Interface interface {
+ RESTClient() rest.Interface
+ EventPoliciesGetter
+}
+
+// EventingV1alpha1Client is used to interact with features provided by the eventing.knative.dev group.
+type EventingV1alpha1Client struct {
+ restClient rest.Interface
+}
+
+func (c *EventingV1alpha1Client) EventPolicies(namespace string) EventPolicyInterface {
+ return newEventPolicies(c, namespace)
+}
+
+// NewForConfig creates a new EventingV1alpha1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*EventingV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new EventingV1alpha1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*EventingV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &EventingV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new EventingV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *EventingV1alpha1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new EventingV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *EventingV1alpha1Client {
+ return &EventingV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1alpha1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *EventingV1alpha1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventpolicy.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventpolicy.go
new file mode 100644
index 00000000000..bb510ab6eee
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventpolicy.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// EventPoliciesGetter has a method to return a EventPolicyInterface.
+// A group's client should implement this interface.
+type EventPoliciesGetter interface {
+ EventPolicies(namespace string) EventPolicyInterface
+}
+
+// EventPolicyInterface has methods to work with EventPolicy resources.
+type EventPolicyInterface interface {
+ Create(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.CreateOptions) (*v1alpha1.EventPolicy, error)
+ Update(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.UpdateOptions) (*v1alpha1.EventPolicy, error)
+ UpdateStatus(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.UpdateOptions) (*v1alpha1.EventPolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.EventPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.EventPolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EventPolicy, err error)
+ EventPolicyExpansion
+}
+
+// eventPolicies implements EventPolicyInterface
+type eventPolicies struct {
+ client rest.Interface
+ ns string
+}
+
+// newEventPolicies returns a EventPolicies
+func newEventPolicies(c *EventingV1alpha1Client, namespace string) *eventPolicies {
+ return &eventPolicies{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the eventPolicy, and returns the corresponding eventPolicy object, and an error if there is any.
+func (c *eventPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.EventPolicy, err error) {
+ result = &v1alpha1.EventPolicy{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of EventPolicies that match those selectors.
+func (c *eventPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.EventPolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.EventPolicyList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested eventPolicies.
+func (c *eventPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a eventPolicy and creates it. Returns the server's representation of the eventPolicy, and an error, if there is any.
+func (c *eventPolicies) Create(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.CreateOptions) (result *v1alpha1.EventPolicy, err error) {
+ result = &v1alpha1.EventPolicy{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a eventPolicy and updates it. Returns the server's representation of the eventPolicy, and an error, if there is any.
+func (c *eventPolicies) Update(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.UpdateOptions) (result *v1alpha1.EventPolicy, err error) {
+ result = &v1alpha1.EventPolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ Name(eventPolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *eventPolicies) UpdateStatus(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.UpdateOptions) (result *v1alpha1.EventPolicy, err error) {
+ result = &v1alpha1.EventPolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ Name(eventPolicy.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the eventPolicy and deletes it. Returns an error if one occurs.
+func (c *eventPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *eventPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched eventPolicy.
+func (c *eventPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EventPolicy, err error) {
+ result = &v1alpha1.EventPolicy{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/doc.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/doc.go
new file mode 100644
index 00000000000..40528db3a52
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go
new file mode 100644
index 00000000000..958cd866140
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+ v1alpha1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1"
+)
+
+type FakeEventingV1alpha1 struct {
+ *testing.Fake
+}
+
+func (c *FakeEventingV1alpha1) EventPolicies(namespace string) v1alpha1.EventPolicyInterface {
+ return &FakeEventPolicies{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeEventingV1alpha1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventpolicy.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventpolicy.go
new file mode 100644
index 00000000000..a31bc820038
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventpolicy.go
@@ -0,0 +1,141 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+ v1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+)
+
+// FakeEventPolicies implements EventPolicyInterface
+type FakeEventPolicies struct {
+ Fake *FakeEventingV1alpha1
+ ns string
+}
+
+var eventpoliciesResource = v1alpha1.SchemeGroupVersion.WithResource("eventpolicies")
+
+var eventpoliciesKind = v1alpha1.SchemeGroupVersion.WithKind("EventPolicy")
+
+// Get takes name of the eventPolicy, and returns the corresponding eventPolicy object, and an error if there is any.
+func (c *FakeEventPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.EventPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(eventpoliciesResource, c.ns, name), &v1alpha1.EventPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventPolicy), err
+}
+
+// List takes label and field selectors, and returns the list of EventPolicies that match those selectors.
+func (c *FakeEventPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.EventPolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(eventpoliciesResource, eventpoliciesKind, c.ns, opts), &v1alpha1.EventPolicyList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.EventPolicyList{ListMeta: obj.(*v1alpha1.EventPolicyList).ListMeta}
+ for _, item := range obj.(*v1alpha1.EventPolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested eventPolicies.
+func (c *FakeEventPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(eventpoliciesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a eventPolicy and creates it. Returns the server's representation of the eventPolicy, and an error, if there is any.
+func (c *FakeEventPolicies) Create(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.CreateOptions) (result *v1alpha1.EventPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(eventpoliciesResource, c.ns, eventPolicy), &v1alpha1.EventPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventPolicy), err
+}
+
+// Update takes the representation of a eventPolicy and updates it. Returns the server's representation of the eventPolicy, and an error, if there is any.
+func (c *FakeEventPolicies) Update(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.UpdateOptions) (result *v1alpha1.EventPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(eventpoliciesResource, c.ns, eventPolicy), &v1alpha1.EventPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventPolicy), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeEventPolicies) UpdateStatus(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.UpdateOptions) (*v1alpha1.EventPolicy, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(eventpoliciesResource, "status", c.ns, eventPolicy), &v1alpha1.EventPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventPolicy), err
+}
+
+// Delete takes name of the eventPolicy and deletes it. Returns an error if one occurs.
+func (c *FakeEventPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(eventpoliciesResource, c.ns, name, opts), &v1alpha1.EventPolicy{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeEventPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(eventpoliciesResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.EventPolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched eventPolicy.
+func (c *FakeEventPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EventPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(eventpoliciesResource, c.ns, name, pt, data, subresources...), &v1alpha1.EventPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventPolicy), err
+}
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go
new file mode 100644
index 00000000000..d5bd1a045d9
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type EventPolicyExpansion interface{}
diff --git a/pkg/client/informers/externalversions/eventing/interface.go b/pkg/client/informers/externalversions/eventing/interface.go
index e77e6ae25d8..ccfc67a42aa 100644
--- a/pkg/client/informers/externalversions/eventing/interface.go
+++ b/pkg/client/informers/externalversions/eventing/interface.go
@@ -20,6 +20,7 @@ package eventing
import (
v1 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1"
+ v1alpha1 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
v1beta1 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta1"
v1beta2 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta2"
v1beta3 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta3"
@@ -28,6 +29,8 @@ import (
// Interface provides access to each of this group's versions.
type Interface interface {
+ // V1alpha1 provides access to shared informers for resources in V1alpha1.
+ V1alpha1() v1alpha1.Interface
// V1beta1 provides access to shared informers for resources in V1beta1.
V1beta1() v1beta1.Interface
// V1beta2 provides access to shared informers for resources in V1beta2.
@@ -49,6 +52,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
+// V1alpha1 returns a new v1alpha1.Interface.
+func (g *group) V1alpha1() v1alpha1.Interface {
+ return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
+}
+
// V1beta1 returns a new v1beta1.Interface.
func (g *group) V1beta1() v1beta1.Interface {
return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
diff --git a/pkg/client/informers/externalversions/eventing/v1alpha1/eventpolicy.go b/pkg/client/informers/externalversions/eventing/v1alpha1/eventpolicy.go
new file mode 100644
index 00000000000..df3b47e2677
--- /dev/null
+++ b/pkg/client/informers/externalversions/eventing/v1alpha1/eventpolicy.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1"
+)
+
+// EventPolicyInformer provides access to a shared informer and lister for
+// EventPolicies.
+type EventPolicyInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.EventPolicyLister
+}
+
+type eventPolicyInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewEventPolicyInformer constructs a new informer for EventPolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewEventPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredEventPolicyInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredEventPolicyInformer constructs a new informer for EventPolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredEventPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1alpha1().EventPolicies(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1alpha1().EventPolicies(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &eventingv1alpha1.EventPolicy{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *eventPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredEventPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *eventPolicyInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&eventingv1alpha1.EventPolicy{}, f.defaultInformer)
+}
+
+func (f *eventPolicyInformer) Lister() v1alpha1.EventPolicyLister {
+ return v1alpha1.NewEventPolicyLister(f.Informer().GetIndexer())
+}
diff --git a/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go b/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go
new file mode 100644
index 00000000000..89263c25853
--- /dev/null
+++ b/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // EventPolicies returns a EventPolicyInformer.
+ EventPolicies() EventPolicyInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// EventPolicies returns a EventPolicyInformer.
+func (v *version) EventPolicies() EventPolicyInformer {
+ return &eventPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go
index 3e039fa365a..41c490e06ce 100644
--- a/pkg/client/informers/externalversions/generic.go
+++ b/pkg/client/informers/externalversions/generic.go
@@ -24,12 +24,13 @@ import (
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
v1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ v1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
v1beta1 "knative.dev/eventing/pkg/apis/eventing/v1beta1"
v1beta2 "knative.dev/eventing/pkg/apis/eventing/v1beta2"
v1beta3 "knative.dev/eventing/pkg/apis/eventing/v1beta3"
flowsv1 "knative.dev/eventing/pkg/apis/flows/v1"
messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
- v1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1"
+ sinksv1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1"
sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1"
sourcesv1beta2 "knative.dev/eventing/pkg/apis/sources/v1beta2"
)
@@ -66,6 +67,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
case v1.SchemeGroupVersion.WithResource("triggers"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1().Triggers().Informer()}, nil
+ // Group=eventing.knative.dev, Version=v1alpha1
+ case v1alpha1.SchemeGroupVersion.WithResource("eventpolicies"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1alpha1().EventPolicies().Informer()}, nil
+
// Group=eventing.knative.dev, Version=v1beta1
case v1beta1.SchemeGroupVersion.WithResource("eventtypes"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1beta1().EventTypes().Informer()}, nil
@@ -93,7 +98,7 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Messaging().V1().Subscriptions().Informer()}, nil
// Group=sinks.knative.dev, Version=v1alpha1
- case v1alpha1.SchemeGroupVersion.WithResource("jobsinks"):
+ case sinksv1alpha1.SchemeGroupVersion.WithResource("jobsinks"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Sinks().V1alpha1().JobSinks().Informer()}, nil
// Group=sources.knative.dev, Version=v1
diff --git a/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/eventpolicy.go b/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/eventpolicy.go
new file mode 100644
index 00000000000..c6da95f0a05
--- /dev/null
+++ b/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/eventpolicy.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by injection-gen. DO NOT EDIT.
+
+package eventpolicy
+
+import (
+ context "context"
+
+ v1alpha1 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
+ factory "knative.dev/eventing/pkg/client/injection/informers/factory"
+ controller "knative.dev/pkg/controller"
+ injection "knative.dev/pkg/injection"
+ logging "knative.dev/pkg/logging"
+)
+
+func init() {
+ injection.Default.RegisterInformer(withInformer)
+}
+
+// Key is used for associating the Informer inside the context.Context.
+type Key struct{}
+
+func withInformer(ctx context.Context) (context.Context, controller.Informer) {
+ f := factory.Get(ctx)
+ inf := f.Eventing().V1alpha1().EventPolicies()
+ return context.WithValue(ctx, Key{}, inf), inf.Informer()
+}
+
+// Get extracts the typed informer from the context.
+func Get(ctx context.Context) v1alpha1.EventPolicyInformer {
+ untyped := ctx.Value(Key{})
+ if untyped == nil {
+ logging.FromContext(ctx).Panic(
+ "Unable to fetch knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1.EventPolicyInformer from context.")
+ }
+ return untyped.(v1alpha1.EventPolicyInformer)
+}
diff --git a/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/fake/fake.go b/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/fake/fake.go
new file mode 100644
index 00000000000..349893d97b7
--- /dev/null
+++ b/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/fake/fake.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by injection-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ context "context"
+
+ eventpolicy "knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy"
+ fake "knative.dev/eventing/pkg/client/injection/informers/factory/fake"
+ controller "knative.dev/pkg/controller"
+ injection "knative.dev/pkg/injection"
+)
+
+var Get = eventpolicy.Get
+
+func init() {
+ injection.Fake.RegisterInformer(withInformer)
+}
+
+func withInformer(ctx context.Context) (context.Context, controller.Informer) {
+ f := fake.Get(ctx)
+ inf := f.Eventing().V1alpha1().EventPolicies()
+ return context.WithValue(ctx, eventpolicy.Key{}, inf), inf.Informer()
+}
diff --git a/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/filtered/eventpolicy.go b/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/filtered/eventpolicy.go
new file mode 100644
index 00000000000..11a83b51a8c
--- /dev/null
+++ b/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/filtered/eventpolicy.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by injection-gen. DO NOT EDIT.
+
+package filtered
+
+import (
+ context "context"
+
+ v1alpha1 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
+ filtered "knative.dev/eventing/pkg/client/injection/informers/factory/filtered"
+ controller "knative.dev/pkg/controller"
+ injection "knative.dev/pkg/injection"
+ logging "knative.dev/pkg/logging"
+)
+
+func init() {
+ injection.Default.RegisterFilteredInformers(withInformer)
+}
+
+// Key is used for associating the Informer inside the context.Context.
+type Key struct {
+ Selector string
+}
+
+func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
+ untyped := ctx.Value(filtered.LabelKey{})
+ if untyped == nil {
+ logging.FromContext(ctx).Panic(
+ "Unable to fetch labelkey from context.")
+ }
+ labelSelectors := untyped.([]string)
+ infs := []controller.Informer{}
+ for _, selector := range labelSelectors {
+ f := filtered.Get(ctx, selector)
+ inf := f.Eventing().V1alpha1().EventPolicies()
+ ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
+ infs = append(infs, inf.Informer())
+ }
+ return ctx, infs
+}
+
+// Get extracts the typed informer from the context.
+func Get(ctx context.Context, selector string) v1alpha1.EventPolicyInformer {
+ untyped := ctx.Value(Key{Selector: selector})
+ if untyped == nil {
+ logging.FromContext(ctx).Panicf(
+ "Unable to fetch knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1.EventPolicyInformer with selector %s from context.", selector)
+ }
+ return untyped.(v1alpha1.EventPolicyInformer)
+}
diff --git a/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/filtered/fake/fake.go b/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/filtered/fake/fake.go
new file mode 100644
index 00000000000..68c9d13a5a9
--- /dev/null
+++ b/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/filtered/fake/fake.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by injection-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ context "context"
+
+ filtered "knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/filtered"
+ factoryfiltered "knative.dev/eventing/pkg/client/injection/informers/factory/filtered"
+ controller "knative.dev/pkg/controller"
+ injection "knative.dev/pkg/injection"
+ logging "knative.dev/pkg/logging"
+)
+
+var Get = filtered.Get
+
+func init() {
+ injection.Fake.RegisterFilteredInformers(withInformer)
+}
+
+func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
+ untyped := ctx.Value(factoryfiltered.LabelKey{})
+ if untyped == nil {
+ logging.FromContext(ctx).Panic(
+ "Unable to fetch labelkey from context.")
+ }
+ labelSelectors := untyped.([]string)
+ infs := []controller.Informer{}
+ for _, selector := range labelSelectors {
+ f := factoryfiltered.Get(ctx, selector)
+ inf := f.Eventing().V1alpha1().EventPolicies()
+ ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
+ infs = append(infs, inf.Informer())
+ }
+ return ctx, infs
+}
diff --git a/pkg/client/injection/reconciler/eventing/v1alpha1/eventpolicy/controller.go b/pkg/client/injection/reconciler/eventing/v1alpha1/eventpolicy/controller.go
new file mode 100644
index 00000000000..89163724a3e
--- /dev/null
+++ b/pkg/client/injection/reconciler/eventing/v1alpha1/eventpolicy/controller.go
@@ -0,0 +1,170 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by injection-gen. DO NOT EDIT.
+
+package eventpolicy
+
+import (
+ context "context"
+ fmt "fmt"
+ reflect "reflect"
+ strings "strings"
+
+ zap "go.uber.org/zap"
+ corev1 "k8s.io/api/core/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ scheme "k8s.io/client-go/kubernetes/scheme"
+ v1 "k8s.io/client-go/kubernetes/typed/core/v1"
+ record "k8s.io/client-go/tools/record"
+ versionedscheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+ client "knative.dev/eventing/pkg/client/injection/client"
+ eventpolicy "knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy"
+ kubeclient "knative.dev/pkg/client/injection/kube/client"
+ controller "knative.dev/pkg/controller"
+ logging "knative.dev/pkg/logging"
+ logkey "knative.dev/pkg/logging/logkey"
+ reconciler "knative.dev/pkg/reconciler"
+)
+
+const (
+ defaultControllerAgentName = "eventpolicy-controller"
+ defaultFinalizerName = "eventpolicies.eventing.knative.dev"
+)
+
+// NewImpl returns a controller.Impl that handles queuing and feeding work from
+// the queue through an implementation of controller.Reconciler, delegating to
+// the provided Interface and optional Finalizer methods. OptionsFn is used to return
+// controller.ControllerOptions to be used by the internal reconciler.
+func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
+ logger := logging.FromContext(ctx)
+
+ // Check the options function input. It should be 0 or 1.
+ if len(optionsFns) > 1 {
+ logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
+ }
+
+ eventpolicyInformer := eventpolicy.Get(ctx)
+
+ lister := eventpolicyInformer.Lister()
+
+ var promoteFilterFunc func(obj interface{}) bool
+ var promoteFunc = func(bkt reconciler.Bucket) {}
+
+ rec := &reconcilerImpl{
+ LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
+ PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
+
+ // Signal promotion event
+ promoteFunc(bkt)
+
+ all, err := lister.List(labels.Everything())
+ if err != nil {
+ return err
+ }
+ for _, elt := range all {
+ if promoteFilterFunc != nil {
+ if ok := promoteFilterFunc(elt); !ok {
+ continue
+ }
+ }
+ enq(bkt, types.NamespacedName{
+ Namespace: elt.GetNamespace(),
+ Name: elt.GetName(),
+ })
+ }
+ return nil
+ },
+ },
+ Client: client.Get(ctx),
+ Lister: lister,
+ reconciler: r,
+ finalizerName: defaultFinalizerName,
+ }
+
+ ctrType := reflect.TypeOf(r).Elem()
+ ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
+ ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
+
+ logger = logger.With(
+ zap.String(logkey.ControllerType, ctrTypeName),
+ zap.String(logkey.Kind, "eventing.knative.dev.EventPolicy"),
+ )
+
+ impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
+ agentName := defaultControllerAgentName
+
+ // Pass impl to the options. Save any optional results.
+ for _, fn := range optionsFns {
+ opts := fn(impl)
+ if opts.ConfigStore != nil {
+ rec.configStore = opts.ConfigStore
+ }
+ if opts.FinalizerName != "" {
+ rec.finalizerName = opts.FinalizerName
+ }
+ if opts.AgentName != "" {
+ agentName = opts.AgentName
+ }
+ if opts.SkipStatusUpdates {
+ rec.skipStatusUpdates = true
+ }
+ if opts.DemoteFunc != nil {
+ rec.DemoteFunc = opts.DemoteFunc
+ }
+ if opts.PromoteFilterFunc != nil {
+ promoteFilterFunc = opts.PromoteFilterFunc
+ }
+ if opts.PromoteFunc != nil {
+ promoteFunc = opts.PromoteFunc
+ }
+ }
+
+ rec.Recorder = createRecorder(ctx, agentName)
+
+ return impl
+}
+
+func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
+ logger := logging.FromContext(ctx)
+
+ recorder := controller.GetEventRecorder(ctx)
+ if recorder == nil {
+ // Create event broadcaster
+ logger.Debug("Creating event broadcaster")
+ eventBroadcaster := record.NewBroadcaster()
+ watches := []watch.Interface{
+ eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
+ eventBroadcaster.StartRecordingToSink(
+ &v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
+ }
+ recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
+ go func() {
+ <-ctx.Done()
+ for _, w := range watches {
+ w.Stop()
+ }
+ }()
+ }
+
+ return recorder
+}
+
+func init() {
+ versionedscheme.AddToScheme(scheme.Scheme)
+}
diff --git a/pkg/client/injection/reconciler/eventing/v1alpha1/eventpolicy/reconciler.go b/pkg/client/injection/reconciler/eventing/v1alpha1/eventpolicy/reconciler.go
new file mode 100644
index 00000000000..264627eb9bd
--- /dev/null
+++ b/pkg/client/injection/reconciler/eventing/v1alpha1/eventpolicy/reconciler.go
@@ -0,0 +1,440 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by injection-gen. DO NOT EDIT.
+
+package eventpolicy
+
+import (
+ context "context"
+ json "encoding/json"
+ fmt "fmt"
+
+ zap "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ v1 "k8s.io/api/core/v1"
+ equality "k8s.io/apimachinery/pkg/api/equality"
+ errors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ sets "k8s.io/apimachinery/pkg/util/sets"
+ record "k8s.io/client-go/tools/record"
+ v1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ eventingv1alpha1 "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1"
+ controller "knative.dev/pkg/controller"
+ kmp "knative.dev/pkg/kmp"
+ logging "knative.dev/pkg/logging"
+ reconciler "knative.dev/pkg/reconciler"
+)
+
+// Interface defines the strongly typed interfaces to be implemented by a
+// controller reconciling v1alpha1.EventPolicy.
+type Interface interface {
+ // ReconcileKind implements custom logic to reconcile v1alpha1.EventPolicy. Any changes
+ // to the objects .Status or .Finalizers will be propagated to the stored
+ // object. It is recommended that implementors do not call any update calls
+ // for the Kind inside of ReconcileKind, it is the responsibility of the calling
+ // controller to propagate those properties. The resource passed to ReconcileKind
+ // will always have an empty deletion timestamp.
+ ReconcileKind(ctx context.Context, o *v1alpha1.EventPolicy) reconciler.Event
+}
+
+// Finalizer defines the strongly typed interfaces to be implemented by a
+// controller finalizing v1alpha1.EventPolicy.
+type Finalizer interface {
+ // FinalizeKind implements custom logic to finalize v1alpha1.EventPolicy. Any changes
+ // to the objects .Status or .Finalizers will be ignored. Returning a nil or
+ // Normal type reconciler.Event will allow the finalizer to be deleted on
+ // the resource. The resource passed to FinalizeKind will always have a set
+ // deletion timestamp.
+ FinalizeKind(ctx context.Context, o *v1alpha1.EventPolicy) reconciler.Event
+}
+
+// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
+// controller reconciling v1alpha1.EventPolicy if they want to process resources for which
+// they are not the leader.
+type ReadOnlyInterface interface {
+ // ObserveKind implements logic to observe v1alpha1.EventPolicy.
+ // This method should not write to the API.
+ ObserveKind(ctx context.Context, o *v1alpha1.EventPolicy) reconciler.Event
+}
+
+type doReconcile func(ctx context.Context, o *v1alpha1.EventPolicy) reconciler.Event
+
+// reconcilerImpl implements controller.Reconciler for v1alpha1.EventPolicy resources.
+type reconcilerImpl struct {
+ // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
+ reconciler.LeaderAwareFuncs
+
+ // Client is used to write back status updates.
+ Client versioned.Interface
+
+ // Listers index properties about resources.
+ Lister eventingv1alpha1.EventPolicyLister
+
+ // Recorder is an event recorder for recording Event resources to the
+ // Kubernetes API.
+ Recorder record.EventRecorder
+
+ // configStore allows for decorating a context with config maps.
+ // +optional
+ configStore reconciler.ConfigStore
+
+ // reconciler is the implementation of the business logic of the resource.
+ reconciler Interface
+
+ // finalizerName is the name of the finalizer to reconcile.
+ finalizerName string
+
+ // skipStatusUpdates configures whether or not this reconciler automatically updates
+ // the status of the reconciled resource.
+ skipStatusUpdates bool
+}
+
+// Check that our Reconciler implements controller.Reconciler.
+var _ controller.Reconciler = (*reconcilerImpl)(nil)
+
+// Check that our generated Reconciler is always LeaderAware.
+var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
+
+func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister eventingv1alpha1.EventPolicyLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
+ // Check the options function input. It should be 0 or 1.
+ if len(options) > 1 {
+ logger.Fatal("Up to one options struct is supported, found: ", len(options))
+ }
+
+ // Fail fast when users inadvertently implement the other LeaderAware interface.
+ // For the typed reconcilers, Promote shouldn't take any arguments.
+ if _, ok := r.(reconciler.LeaderAware); ok {
+ logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
+ }
+
+ rec := &reconcilerImpl{
+ LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
+ PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
+ all, err := lister.List(labels.Everything())
+ if err != nil {
+ return err
+ }
+ for _, elt := range all {
+ // TODO: Consider letting users specify a filter in options.
+ enq(bkt, types.NamespacedName{
+ Namespace: elt.GetNamespace(),
+ Name: elt.GetName(),
+ })
+ }
+ return nil
+ },
+ },
+ Client: client,
+ Lister: lister,
+ Recorder: recorder,
+ reconciler: r,
+ finalizerName: defaultFinalizerName,
+ }
+
+ for _, opts := range options {
+ if opts.ConfigStore != nil {
+ rec.configStore = opts.ConfigStore
+ }
+ if opts.FinalizerName != "" {
+ rec.finalizerName = opts.FinalizerName
+ }
+ if opts.SkipStatusUpdates {
+ rec.skipStatusUpdates = true
+ }
+ if opts.DemoteFunc != nil {
+ rec.DemoteFunc = opts.DemoteFunc
+ }
+ }
+
+ return rec
+}
+
+// Reconcile implements controller.Reconciler
+func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
+ logger := logging.FromContext(ctx)
+
+ // Initialize the reconciler state. This will convert the namespace/name
+ // string into a distinct namespace and name, determine if this instance of
+ // the reconciler is the leader, and any additional interfaces implemented
+ // by the reconciler. Returns an error is the resource key is invalid.
+ s, err := newState(key, r)
+ if err != nil {
+ logger.Error("Invalid resource key: ", key)
+ return nil
+ }
+
+ // If we are not the leader, and we don't implement either ReadOnly
+ // observer interfaces, then take a fast-path out.
+ if s.isNotLeaderNorObserver() {
+ return controller.NewSkipKey(key)
+ }
+
+ // If configStore is set, attach the frozen configuration to the context.
+ if r.configStore != nil {
+ ctx = r.configStore.ToContext(ctx)
+ }
+
+ // Add the recorder to context.
+ ctx = controller.WithEventRecorder(ctx, r.Recorder)
+
+ // Get the resource with this namespace/name.
+
+ getter := r.Lister.EventPolicies(s.namespace)
+
+ original, err := getter.Get(s.name)
+
+ if errors.IsNotFound(err) {
+ // The resource may no longer exist, in which case we stop processing and call
+ // the ObserveDeletion handler if appropriate.
+ logger.Debugf("Resource %q no longer exists", key)
+ if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
+ return del.ObserveDeletion(ctx, types.NamespacedName{
+ Namespace: s.namespace,
+ Name: s.name,
+ })
+ }
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ // Don't modify the informers copy.
+ resource := original.DeepCopy()
+
+ var reconcileEvent reconciler.Event
+
+ name, do := s.reconcileMethodFor(resource)
+ // Append the target method to the logger.
+ logger = logger.With(zap.String("targetMethod", name))
+ switch name {
+ case reconciler.DoReconcileKind:
+ // Set and update the finalizer on resource if r.reconciler
+ // implements Finalizer.
+ if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
+ return fmt.Errorf("failed to set finalizers: %w", err)
+ }
+
+ if !r.skipStatusUpdates {
+ reconciler.PreProcessReconcile(ctx, resource)
+ }
+
+ // Reconcile this copy of the resource and then write back any status
+ // updates regardless of whether the reconciliation errored out.
+ reconcileEvent = do(ctx, resource)
+
+ if !r.skipStatusUpdates {
+ reconciler.PostProcessReconcile(ctx, resource, original)
+ }
+
+ case reconciler.DoFinalizeKind:
+ // For finalizing reconcilers, if this resource being marked for deletion
+ // and reconciled cleanly (nil or normal event), remove the finalizer.
+ reconcileEvent = do(ctx, resource)
+
+ if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
+ return fmt.Errorf("failed to clear finalizers: %w", err)
+ }
+
+ case reconciler.DoObserveKind:
+ // Observe any changes to this resource, since we are not the leader.
+ reconcileEvent = do(ctx, resource)
+
+ }
+
+ // Synchronize the status.
+ switch {
+ case r.skipStatusUpdates:
+ // This reconciler implementation is configured to skip resource updates.
+ // This may mean this reconciler does not observe spec, but reconciles external changes.
+ case equality.Semantic.DeepEqual(original.Status, resource.Status):
+ // If we didn't change anything then don't call updateStatus.
+ // This is important because the copy we loaded from the injectionInformer's
+ // cache may be stale and we don't want to overwrite a prior update
+ // to status with this stale state.
+ case !s.isLeader:
+ // High-availability reconcilers may have many replicas watching the resource, but only
+ // the elected leader is expected to write modifications.
+ logger.Warn("Saw status changes when we aren't the leader!")
+ default:
+ if err = r.updateStatus(ctx, logger, original, resource); err != nil {
+ logger.Warnw("Failed to update resource status", zap.Error(err))
+ r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed",
+ "Failed to update status for %q: %v", resource.Name, err)
+ return err
+ }
+ }
+
+ // Report the reconciler event, if any.
+ if reconcileEvent != nil {
+ var event *reconciler.ReconcilerEvent
+ if reconciler.EventAs(reconcileEvent, &event) {
+ logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
+ r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
+
+ // the event was wrapped inside an error, consider the reconciliation as failed
+ if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
+ return reconcileEvent
+ }
+ return nil
+ }
+
+ if controller.IsSkipKey(reconcileEvent) {
+ // This is a wrapped error, don't emit an event.
+ } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
+ // This is a wrapped error, don't emit an event.
+ } else {
+ logger.Errorw("Returned an error", zap.Error(reconcileEvent))
+ r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
+ }
+ return reconcileEvent
+ }
+
+ return nil
+}
+
+func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1alpha1.EventPolicy, desired *v1alpha1.EventPolicy) error {
+ existing = existing.DeepCopy()
+ return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
+ // The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API.
+ if attempts > 0 {
+
+ getter := r.Client.EventingV1alpha1().EventPolicies(desired.Namespace)
+
+ existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{})
+ if err != nil {
+ return err
+ }
+ }
+
+ // If there's nothing to update, just return.
+ if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
+ return nil
+ }
+
+ if logger.Desugar().Core().Enabled(zapcore.DebugLevel) {
+ if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" {
+ logger.Debug("Updating status with: ", diff)
+ }
+ }
+
+ existing.Status = desired.Status
+
+ updater := r.Client.EventingV1alpha1().EventPolicies(existing.Namespace)
+
+ _, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{})
+ return err
+ })
+}
+
+// updateFinalizersFiltered will update the Finalizers of the resource.
+// TODO: this method could be generic and sync all finalizers. For now it only
+// updates defaultFinalizerName or its override.
+func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1alpha1.EventPolicy, desiredFinalizers sets.Set[string]) (*v1alpha1.EventPolicy, error) {
+ // Don't modify the informers copy.
+ existing := resource.DeepCopy()
+
+ var finalizers []string
+
+ // If there's nothing to update, just return.
+ existingFinalizers := sets.New[string](existing.Finalizers...)
+
+ if desiredFinalizers.Has(r.finalizerName) {
+ if existingFinalizers.Has(r.finalizerName) {
+ // Nothing to do.
+ return resource, nil
+ }
+ // Add the finalizer.
+ finalizers = append(existing.Finalizers, r.finalizerName)
+ } else {
+ if !existingFinalizers.Has(r.finalizerName) {
+ // Nothing to do.
+ return resource, nil
+ }
+ // Remove the finalizer.
+ existingFinalizers.Delete(r.finalizerName)
+ finalizers = sets.List(existingFinalizers)
+ }
+
+ mergePatch := map[string]interface{}{
+ "metadata": map[string]interface{}{
+ "finalizers": finalizers,
+ "resourceVersion": existing.ResourceVersion,
+ },
+ }
+
+ patch, err := json.Marshal(mergePatch)
+ if err != nil {
+ return resource, err
+ }
+
+ patcher := r.Client.EventingV1alpha1().EventPolicies(resource.Namespace)
+
+ resourceName := resource.Name
+ updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
+ if err != nil {
+ r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed",
+ "Failed to update finalizers for %q: %v", resourceName, err)
+ } else {
+ r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate",
+ "Updated %q finalizers", resource.GetName())
+ }
+ return updated, err
+}
+
+func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1alpha1.EventPolicy) (*v1alpha1.EventPolicy, error) {
+ if _, ok := r.reconciler.(Finalizer); !ok {
+ return resource, nil
+ }
+
+ finalizers := sets.New[string](resource.Finalizers...)
+
+ // If this resource is not being deleted, mark the finalizer.
+ if resource.GetDeletionTimestamp().IsZero() {
+ finalizers.Insert(r.finalizerName)
+ }
+
+ // Synchronize the finalizers filtered by r.finalizerName.
+ return r.updateFinalizersFiltered(ctx, resource, finalizers)
+}
+
+func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1alpha1.EventPolicy, reconcileEvent reconciler.Event) (*v1alpha1.EventPolicy, error) {
+ if _, ok := r.reconciler.(Finalizer); !ok {
+ return resource, nil
+ }
+ if resource.GetDeletionTimestamp().IsZero() {
+ return resource, nil
+ }
+
+ finalizers := sets.New[string](resource.Finalizers...)
+
+ if reconcileEvent != nil {
+ var event *reconciler.ReconcilerEvent
+ if reconciler.EventAs(reconcileEvent, &event) {
+ if event.EventType == v1.EventTypeNormal {
+ finalizers.Delete(r.finalizerName)
+ }
+ }
+ } else {
+ finalizers.Delete(r.finalizerName)
+ }
+
+ // Synchronize the finalizers filtered by r.finalizerName.
+ return r.updateFinalizersFiltered(ctx, resource, finalizers)
+}
diff --git a/pkg/client/injection/reconciler/eventing/v1alpha1/eventpolicy/state.go b/pkg/client/injection/reconciler/eventing/v1alpha1/eventpolicy/state.go
new file mode 100644
index 00000000000..e9f265c0206
--- /dev/null
+++ b/pkg/client/injection/reconciler/eventing/v1alpha1/eventpolicy/state.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by injection-gen. DO NOT EDIT.
+
+package eventpolicy
+
+import (
+ fmt "fmt"
+
+ types "k8s.io/apimachinery/pkg/types"
+ cache "k8s.io/client-go/tools/cache"
+ v1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ reconciler "knative.dev/pkg/reconciler"
+)
+
+// state is used to track the state of a reconciler in a single run.
+type state struct {
+ // key is the original reconciliation key from the queue.
+ key string
+ // namespace is the namespace split from the reconciliation key.
+ namespace string
+ // name is the name split from the reconciliation key.
+ name string
+ // reconciler is the reconciler.
+ reconciler Interface
+ // roi is the read only interface cast of the reconciler.
+ roi ReadOnlyInterface
+ // isROI (Read Only Interface) the reconciler only observes reconciliation.
+ isROI bool
+ // isLeader the instance of the reconciler is the elected leader.
+ isLeader bool
+}
+
+func newState(key string, r *reconcilerImpl) (*state, error) {
+ // Convert the namespace/name string into a distinct namespace and name.
+ namespace, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ return nil, fmt.Errorf("invalid resource key: %s", key)
+ }
+
+ roi, isROI := r.reconciler.(ReadOnlyInterface)
+
+ isLeader := r.IsLeaderFor(types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ })
+
+ return &state{
+ key: key,
+ namespace: namespace,
+ name: name,
+ reconciler: r.reconciler,
+ roi: roi,
+ isROI: isROI,
+ isLeader: isLeader,
+ }, nil
+}
+
+// isNotLeaderNorObserver checks to see if this reconciler with the current
+// state is enabled to do any work or not.
+// isNotLeaderNorObserver returns true when there is no work possible for the
+// reconciler.
+func (s *state) isNotLeaderNorObserver() bool {
+ if !s.isLeader && !s.isROI {
+ // If we are not the leader, and we don't implement the ReadOnly
+ // interface, then take a fast-path out.
+ return true
+ }
+ return false
+}
+
+func (s *state) reconcileMethodFor(o *v1alpha1.EventPolicy) (string, doReconcile) {
+ if o.GetDeletionTimestamp().IsZero() {
+ if s.isLeader {
+ return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
+ } else if s.isROI {
+ return reconciler.DoObserveKind, s.roi.ObserveKind
+ }
+ } else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
+ return reconciler.DoFinalizeKind, fin.FinalizeKind
+ }
+ return "unknown", nil
+}
diff --git a/pkg/client/listers/eventing/v1alpha1/eventpolicy.go b/pkg/client/listers/eventing/v1alpha1/eventpolicy.go
new file mode 100644
index 00000000000..4601f8069e6
--- /dev/null
+++ b/pkg/client/listers/eventing/v1alpha1/eventpolicy.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+)
+
+// EventPolicyLister helps list EventPolicies.
+// All objects returned here must be treated as read-only.
+type EventPolicyLister interface {
+ // List lists all EventPolicies in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.EventPolicy, err error)
+ // EventPolicies returns an object that can list and get EventPolicies.
+ EventPolicies(namespace string) EventPolicyNamespaceLister
+ EventPolicyListerExpansion
+}
+
+// eventPolicyLister implements the EventPolicyLister interface.
+type eventPolicyLister struct {
+ indexer cache.Indexer
+}
+
+// NewEventPolicyLister returns a new EventPolicyLister.
+func NewEventPolicyLister(indexer cache.Indexer) EventPolicyLister {
+ return &eventPolicyLister{indexer: indexer}
+}
+
+// List lists all EventPolicies in the indexer.
+func (s *eventPolicyLister) List(selector labels.Selector) (ret []*v1alpha1.EventPolicy, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.EventPolicy))
+ })
+ return ret, err
+}
+
+// EventPolicies returns an object that can list and get EventPolicies.
+func (s *eventPolicyLister) EventPolicies(namespace string) EventPolicyNamespaceLister {
+ return eventPolicyNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// EventPolicyNamespaceLister helps list and get EventPolicies.
+// All objects returned here must be treated as read-only.
+type EventPolicyNamespaceLister interface {
+ // List lists all EventPolicies in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.EventPolicy, err error)
+ // Get retrieves the EventPolicy from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1alpha1.EventPolicy, error)
+ EventPolicyNamespaceListerExpansion
+}
+
+// eventPolicyNamespaceLister implements the EventPolicyNamespaceLister
+// interface.
+type eventPolicyNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all EventPolicies in the indexer for a given namespace.
+func (s eventPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.EventPolicy, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.EventPolicy))
+ })
+ return ret, err
+}
+
+// Get retrieves the EventPolicy from the indexer for a given namespace and name.
+func (s eventPolicyNamespaceLister) Get(name string) (*v1alpha1.EventPolicy, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("eventpolicy"), name)
+ }
+ return obj.(*v1alpha1.EventPolicy), nil
+}
diff --git a/pkg/client/listers/eventing/v1alpha1/expansion_generated.go b/pkg/client/listers/eventing/v1alpha1/expansion_generated.go
new file mode 100644
index 00000000000..e3f601930d7
--- /dev/null
+++ b/pkg/client/listers/eventing/v1alpha1/expansion_generated.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// EventPolicyListerExpansion allows custom methods to be added to
+// EventPolicyLister.
+type EventPolicyListerExpansion interface{}
+
+// EventPolicyNamespaceListerExpansion allows custom methods to be added to
+// EventPolicyNamespaceLister.
+type EventPolicyNamespaceListerExpansion interface{}
From 8da4543f0d1e6873da10f62f0289ed4f2e55fde1 Mon Sep 17 00:00:00 2001
From: Pierangelo Di Pilato
Date: Wed, 12 Jun 2024 18:04:48 +0200
Subject: [PATCH 11/33] Add `default-authorization-mode` feature flag (#7996)
Add default-authorization-mode feature flag
Signed-off-by: Pierangelo Di Pilato
---
config/core/configmaps/features.yaml | 6 ++
pkg/apis/feature/features.go | 59 +++++++++++++++----
pkg/apis/feature/features_test.go | 1 +
pkg/apis/feature/flag_names.go | 1 +
.../feature/testdata/config-features.yaml | 1 +
5 files changed, 57 insertions(+), 11 deletions(-)
diff --git a/config/core/configmaps/features.yaml b/config/core/configmaps/features.yaml
index fe1ed2aa5f1..9dbec1753ef 100644
--- a/config/core/configmaps/features.yaml
+++ b/config/core/configmaps/features.yaml
@@ -56,6 +56,12 @@ data:
# For more details: https://github.com/knative/eventing/issues/7174
authentication-oidc: "disabled"
+ # ALPHA feature: The default-authorization-mode flag allows you to change the default
+ # authorization mode for resources that have no EventPolicy associated with them.
+ #
+ # This feature flag is only used when "authentication-oidc" is enabled.
+ default-authorization-mode: "allow-same-namespace"
+
# ALPHA feature: The cross-namespace-event-links flag allows you to use cross-namespace referencing for Eventing.
# For more details: https://github.com/knative/eventing/issues/7739
cross-namespace-event-links: "disabled"
diff --git a/pkg/apis/feature/features.go b/pkg/apis/feature/features.go
index e01195bb5f6..982ca8c67e0 100644
--- a/pkg/apis/feature/features.go
+++ b/pkg/apis/feature/features.go
@@ -45,6 +45,24 @@ const (
// - Addressables should advertise both HTTP and HTTPS endpoints
// - Producers should prefer to send events to HTTPS endpoints, if available
Permissive Flag = "Permissive"
+
+ // AuthorizationAllowAll is a value for AuthorizationDefaultMode that indicates to allow all
+ // OIDC subjects by default.
+ // This configuration is applied when there is no EventPolicy with a "to" referencing a given
+ // resource.
+ AuthorizationAllowAll Flag = "Allow-All"
+
+ // AuthorizationDenyAll is a value for AuthorizationDefaultMode that indicates to deny all
+ // OIDC subjects by default.
+ // This configuration is applied when there is no EventPolicy with a "to" referencing a given
+ // resource.
+ AuthorizationDenyAll Flag = "Deny-All"
+
+ // AuthorizationAllowSameNamespace is a value for AuthorizationDefaultMode that indicates to allow
+ // OIDC subjects with the same namespace as a given resource.
+ // This configuration is applied when there is no EventPolicy with a "to" referencing a given
+ // resource.
+ AuthorizationAllowSameNamespace Flag = "Allow-Same-Namespace"
)
// Flags is a map containing all the enabled/disabled flags for the experimental features.
@@ -53,15 +71,16 @@ type Flags map[string]Flag
func newDefaults() Flags {
return map[string]Flag{
- KReferenceGroup: Disabled,
- DeliveryRetryAfter: Disabled,
- DeliveryTimeout: Enabled,
- KReferenceMapping: Disabled,
- NewTriggerFilters: Enabled,
- TransportEncryption: Disabled,
- OIDCAuthentication: Disabled,
- EvenTypeAutoCreate: Disabled,
- NewAPIServerFilters: Disabled,
+ KReferenceGroup: Disabled,
+ DeliveryRetryAfter: Disabled,
+ DeliveryTimeout: Enabled,
+ KReferenceMapping: Disabled,
+ NewTriggerFilters: Enabled,
+ TransportEncryption: Disabled,
+ OIDCAuthentication: Disabled,
+ EvenTypeAutoCreate: Disabled,
+ NewAPIServerFilters: Disabled,
+ AuthorizationDefaultMode: AuthorizationAllowSameNamespace,
}
}
@@ -103,6 +122,18 @@ func (e Flags) IsCrossNamespaceEventLinks() bool {
return e != nil && e[CrossNamespaceEventLinks] == Enabled
}
+func (e Flags) IsAuthorizationDefaultModeAllowAll() bool {
+ return e != nil && e[AuthorizationDefaultMode] == AuthorizationAllowAll
+}
+
+func (e Flags) IsAuthorizationDefaultModeDenyAll() bool {
+ return e != nil && e[AuthorizationDefaultMode] == AuthorizationDenyAll
+}
+
+func (e Flags) IsAuthorizationDefaultModeSameNamespace() bool {
+ return e != nil && e[AuthorizationDefaultMode] == AuthorizationAllowSameNamespace
+}
+
func (e Flags) String() string {
return fmt.Sprintf("%+v", map[string]Flag(e))
}
@@ -142,10 +173,16 @@ func NewFlagsConfigFromMap(data map[string]string) (Flags, error) {
flags[sanitizedKey] = Disabled
} else if strings.EqualFold(v, string(Enabled)) {
flags[sanitizedKey] = Enabled
- } else if k == TransportEncryption && strings.EqualFold(v, string(Permissive)) {
+ } else if sanitizedKey == TransportEncryption && strings.EqualFold(v, string(Permissive)) {
flags[sanitizedKey] = Permissive
- } else if k == TransportEncryption && strings.EqualFold(v, string(Strict)) {
+ } else if sanitizedKey == TransportEncryption && strings.EqualFold(v, string(Strict)) {
flags[sanitizedKey] = Strict
+ } else if sanitizedKey == AuthorizationDefaultMode && strings.EqualFold(v, string(AuthorizationAllowAll)) {
+ flags[sanitizedKey] = AuthorizationAllowAll
+ } else if sanitizedKey == AuthorizationDefaultMode && strings.EqualFold(v, string(AuthorizationDenyAll)) {
+ flags[sanitizedKey] = AuthorizationDenyAll
+ } else if sanitizedKey == AuthorizationDefaultMode && strings.EqualFold(v, string(AuthorizationAllowSameNamespace)) {
+ flags[sanitizedKey] = AuthorizationAllowSameNamespace
} else if strings.Contains(k, NodeSelectorLabel) {
flags[sanitizedKey] = Flag(v)
} else {
diff --git a/pkg/apis/feature/features_test.go b/pkg/apis/feature/features_test.go
index 9b599e5049f..c03561cc616 100644
--- a/pkg/apis/feature/features_test.go
+++ b/pkg/apis/feature/features_test.go
@@ -56,6 +56,7 @@ func TestGetFlags(t *testing.T) {
require.True(t, flags.IsAllowed("my-enabled-flag"))
require.True(t, flags.IsAllowed("my-allowed-flag"))
require.False(t, flags.IsAllowed("non-disabled-flag"))
+ require.True(t, flags.IsAuthorizationDefaultModeSameNamespace())
nodeSelector := flags.NodeSelector()
expectedNodeSelector := map[string]string{"testkey": "testvalue", "testkey1": "testvalue1", "testkey2": "testvalue2"}
diff --git a/pkg/apis/feature/flag_names.go b/pkg/apis/feature/flag_names.go
index cd937554c4b..99abc20769c 100644
--- a/pkg/apis/feature/flag_names.go
+++ b/pkg/apis/feature/flag_names.go
@@ -28,4 +28,5 @@ const (
NodeSelectorLabel = "apiserversources-nodeselector-"
CrossNamespaceEventLinks = "cross-namespace-event-links"
NewAPIServerFilters = "new-apiserversource-filters"
+ AuthorizationDefaultMode = "default-authorization-mode"
)
diff --git a/pkg/apis/feature/testdata/config-features.yaml b/pkg/apis/feature/testdata/config-features.yaml
index 108b17851eb..6c3252ba429 100644
--- a/pkg/apis/feature/testdata/config-features.yaml
+++ b/pkg/apis/feature/testdata/config-features.yaml
@@ -25,6 +25,7 @@ data:
my-enabled-flag: "enabled"
my-disabled-flag: "disabled"
my-allowed-flag: "allowed"
+ default-authorization-mode: allow-same-namespace
apiserversources-nodeselector-testkey: testvalue
apiserversources-nodeselector-testkey1: testvalue1
apiserversources-nodeselector-testkey2: testvalue2
From ea8f0fda4c0614573d4ed50e1a78fe876e6e43ca Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christoph=20St=C3=A4bler?=
Date: Thu, 13 Jun 2024 11:31:07 +0200
Subject: [PATCH 12/33] Add `AppliedEventPoliciesStatus` type (#7999)
* Add AppliedEventPoliciesStatus type
* Add AppliedEventPoliciesStatus to Broker status
* Add AppliedEventPoliciesStatus to Parallel status
* Add AppliedEventPoliciesStatus to Sequence status
* Add AppliedEventPoliciesStatus to InMemoryChannel status
* Move AppliedEventPoliciesStatus to eventingduckv1
* run ./hack/update-codegen.sh
* Fix typo in filename and rename
* Mark .status.policies as optional
* Fix build issue in IMC lifecycle unit test
* Included review feedback (AppliedEventPoliciesStatusPolicy -> AppliedEventPolicyRef)
* Fix verify issue
---
.../resources/in-memory-channel.yaml | 12 ++
config/core/resources/broker.yaml | 12 ++
config/core/resources/parallel.yaml | 12 ++
config/core/resources/sequence.yaml | 12 ++
docs/eventing-api.md | 142 ++++++++++++++++++
pkg/apis/duck/v1/eventpoliciesstatus_type.go | 35 +++++
pkg/apis/duck/v1/zz_generated.deepcopy.go | 37 +++++
pkg/apis/eventing/v1/broker_types.go | 4 +
pkg/apis/eventing/v1/zz_generated.deepcopy.go | 1 +
pkg/apis/flows/v1/parallel_types.go | 5 +-
pkg/apis/flows/v1/sequence_types.go | 5 +-
pkg/apis/flows/v1/zz_generated.deepcopy.go | 2 +
.../v1/in_memory_channel_lifecycle_test.go | 4 +-
.../messaging/v1/in_memory_channel_types.go | 4 +
.../messaging/v1/zz_generated.deepcopy.go | 1 +
15 files changed, 284 insertions(+), 4 deletions(-)
create mode 100644 pkg/apis/duck/v1/eventpoliciesstatus_type.go
diff --git a/config/channels/in-memory-channel/resources/in-memory-channel.yaml b/config/channels/in-memory-channel/resources/in-memory-channel.yaml
index be64bb4cb3a..be5a469d060 100644
--- a/config/channels/in-memory-channel/resources/in-memory-channel.yaml
+++ b/config/channels/in-memory-channel/resources/in-memory-channel.yaml
@@ -209,6 +209,18 @@ spec:
description: Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.
type: object
x-kubernetes-preserve-unknown-fields: true
+ policies:
+ description: List of applied EventPolicies
+ type: array
+ items:
+ type: object
+ properties:
+ apiVersion:
+ description: The API version of the applied EventPolicy. This indicates, which version of EventPolicy is supported by the resource.
+ type: string
+ name:
+ description: The name of the applied EventPolicy
+ type: string
conditions:
description: Conditions the latest available observations of a resource's current state.
type: array
diff --git a/config/core/resources/broker.yaml b/config/core/resources/broker.yaml
index 365692f9727..c8e3eea5f39 100644
--- a/config/core/resources/broker.yaml
+++ b/config/core/resources/broker.yaml
@@ -132,6 +132,18 @@ spec:
description: Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.
type: object
x-kubernetes-preserve-unknown-fields: true
+ policies:
+ description: List of applied EventPolicies
+ type: array
+ items:
+ type: object
+ properties:
+ apiVersion:
+ description: The API version of the applied EventPolicy. This indicates, which version of EventPolicy is supported by the resource.
+ type: string
+ name:
+ description: The name of the applied EventPolicy
+ type: string
conditions:
description: Conditions the latest available observations of a resource's current state.
type: array
diff --git a/config/core/resources/parallel.yaml b/config/core/resources/parallel.yaml
index 269eb193022..492e3f119f4 100644
--- a/config/core/resources/parallel.yaml
+++ b/config/core/resources/parallel.yaml
@@ -589,6 +589,18 @@ spec:
description: 'UID of the referent. More
info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
+ policies:
+ description: List of applied EventPolicies
+ type: array
+ items:
+ type: object
+ properties:
+ apiVersion:
+ description: The API version of the applied EventPolicy. This indicates, which version of EventPolicy is supported by the resource.
+ type: string
+ name:
+ description: The name of the applied EventPolicy
+ type: string
conditions:
description: Conditions the latest available observations of a resource's
current state.
diff --git a/config/core/resources/sequence.yaml b/config/core/resources/sequence.yaml
index af6157e4eff..f0021d89c88 100644
--- a/config/core/resources/sequence.yaml
+++ b/config/core/resources/sequence.yaml
@@ -257,6 +257,18 @@ spec:
type:
description: Type of condition.
type: string
+ policies:
+ description: List of applied EventPolicies
+ type: array
+ items:
+ type: object
+ properties:
+ apiVersion:
+ description: The API version of the applied EventPolicy. This indicates, which version of EventPolicy is supported by the resource.
+ type: string
+ name:
+ description: The name of the applied EventPolicy
+ type: string
conditions:
description: Conditions the latest available observations of a resource's current state.
type: array
diff --git a/docs/eventing-api.md b/docs/eventing-api.md
index 091344ae66c..c11bcd806ea 100644
--- a/docs/eventing-api.md
+++ b/docs/eventing-api.md
@@ -46,6 +46,80 @@
Resource Types:
+AppliedEventPoliciesStatus
+
+
+(Appears on:BrokerStatus, ParallelStatus, SequenceStatus, InMemoryChannelStatus)
+
+
+
AppliedEventPoliciesStatus contains the list of policies which apply to a resource.
+This type is intended to be embedded into a status struct.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+policies
+
+
+[]AppliedEventPolicyRef
+
+
+ |
+
+(Optional)
+ Policies holds the list of applied EventPolicies
+ |
+
+
+
+AppliedEventPolicyRef
+
+
+(Appears on:AppliedEventPoliciesStatus)
+
+
+
AppliedEventPolicyRef is the reference to an EventPolicy
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+apiVersion
+
+string
+
+ |
+
+ APIVersion of the applied EventPolicy.
+This indicates, which version of EventPolicy is supported by the resource.
+ |
+
+
+
+name
+
+string
+
+ |
+
+ Name of the applied EventPolicy
+ |
+
+
+
BackoffPolicyType
(string
alias)
@@ -2102,6 +2176,23 @@ DeliveryStatus
resolved delivery options.
+
+
+AppliedEventPoliciesStatus
+
+
+AppliedEventPoliciesStatus
+
+
+ |
+
+
+(Members of AppliedEventPoliciesStatus are embedded into this type.)
+
+(Optional)
+AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this Broker
+ |
+
SubscriptionsAPIFilter
@@ -4313,6 +4404,23 @@ knative.dev/pkg/apis/duck/v1.AuthStatus
Auth provides the relevant information for OIDC authentication.
+
+
+AppliedEventPoliciesStatus
+
+
+AppliedEventPoliciesStatus
+
+
+ |
+
+
+(Members of AppliedEventPoliciesStatus are embedded into this type.)
+
+(Optional)
+AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this Broker
+ |
+
ParallelSubscriptionStatus
@@ -4661,6 +4769,23 @@ knative.dev/pkg/apis/duck/v1.AuthStatus
Auth provides the relevant information for OIDC authentication.
+
+
+AppliedEventPoliciesStatus
+
+
+AppliedEventPoliciesStatus
+
+
+ |
+
+
+(Members of AppliedEventPoliciesStatus are embedded into this type.)
+
+(Optional)
+AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this Broker
+ |
+
SequenceStep
@@ -5334,6 +5459,23 @@ ChannelableStatus
Channel conforms to Duck type ChannelableStatus.
+
+
+AppliedEventPoliciesStatus
+
+
+AppliedEventPoliciesStatus
+
+
+ |
+
+
+(Members of AppliedEventPoliciesStatus are embedded into this type.)
+
+(Optional)
+AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this Broker
+ |
+
SubscriptionSpec
diff --git a/pkg/apis/duck/v1/eventpoliciesstatus_type.go b/pkg/apis/duck/v1/eventpoliciesstatus_type.go
new file mode 100644
index 00000000000..d895e075eb7
--- /dev/null
+++ b/pkg/apis/duck/v1/eventpoliciesstatus_type.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// AppliedEventPoliciesStatus contains the list of policies which apply to a resource.
+// This type is intended to be embedded into a status struct.
+type AppliedEventPoliciesStatus struct {
+ // Policies holds the list of applied EventPolicies
+ // +optional
+ Policies []AppliedEventPolicyRef `json:"policies,omitempty"`
+}
+
+// AppliedEventPolicyRef is the reference to an EventPolicy
+type AppliedEventPolicyRef struct {
+ // APIVersion of the applied EventPolicy.
+ // This indicates, which version of EventPolicy is supported by the resource.
+ APIVersion string `json:"apiVersion"`
+
+ // Name of the applied EventPolicy
+ Name string `json:"name"`
+}
diff --git a/pkg/apis/duck/v1/zz_generated.deepcopy.go b/pkg/apis/duck/v1/zz_generated.deepcopy.go
index 0dcd45cf788..609c7f6f6e0 100644
--- a/pkg/apis/duck/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/duck/v1/zz_generated.deepcopy.go
@@ -27,6 +27,43 @@ import (
duckv1 "knative.dev/pkg/apis/duck/v1"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AppliedEventPoliciesStatus) DeepCopyInto(out *AppliedEventPoliciesStatus) {
+ *out = *in
+ if in.Policies != nil {
+ in, out := &in.Policies, &out.Policies
+ *out = make([]AppliedEventPolicyRef, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedEventPoliciesStatus.
+func (in *AppliedEventPoliciesStatus) DeepCopy() *AppliedEventPoliciesStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AppliedEventPoliciesStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AppliedEventPolicyRef) DeepCopyInto(out *AppliedEventPolicyRef) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedEventPolicyRef.
+func (in *AppliedEventPolicyRef) DeepCopy() *AppliedEventPolicyRef {
+ if in == nil {
+ return nil
+ }
+ out := new(AppliedEventPolicyRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Channelable) DeepCopyInto(out *Channelable) {
*out = *in
diff --git a/pkg/apis/eventing/v1/broker_types.go b/pkg/apis/eventing/v1/broker_types.go
index e86ffad7cc7..f536a879b2f 100644
--- a/pkg/apis/eventing/v1/broker_types.go
+++ b/pkg/apis/eventing/v1/broker_types.go
@@ -96,6 +96,10 @@ type BrokerStatus struct {
// DeliveryStatus contains a resolved URL to the dead letter sink address, and any other
// resolved delivery options.
eventingduckv1.DeliveryStatus `json:",inline"`
+
+ // AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this Broker
+ // +optional
+ eventingduckv1.AppliedEventPoliciesStatus `json:",inline"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
diff --git a/pkg/apis/eventing/v1/zz_generated.deepcopy.go b/pkg/apis/eventing/v1/zz_generated.deepcopy.go
index 41765815e95..b873f4032a0 100644
--- a/pkg/apis/eventing/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/eventing/v1/zz_generated.deepcopy.go
@@ -121,6 +121,7 @@ func (in *BrokerStatus) DeepCopyInto(out *BrokerStatus) {
in.Status.DeepCopyInto(&out.Status)
in.AddressStatus.DeepCopyInto(&out.AddressStatus)
in.DeliveryStatus.DeepCopyInto(&out.DeliveryStatus)
+ in.AppliedEventPoliciesStatus.DeepCopyInto(&out.AppliedEventPoliciesStatus)
return
}
diff --git a/pkg/apis/flows/v1/parallel_types.go b/pkg/apis/flows/v1/parallel_types.go
index cf9d6d6eae0..2740a72f010 100644
--- a/pkg/apis/flows/v1/parallel_types.go
+++ b/pkg/apis/flows/v1/parallel_types.go
@@ -20,7 +20,6 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
-
eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
"knative.dev/pkg/apis"
@@ -123,6 +122,10 @@ type ParallelStatus struct {
// Auth provides the relevant information for OIDC authentication.
// +optional
Auth *duckv1.AuthStatus `json:"auth,omitempty"`
+
+ // AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this Broker
+ // +optional
+ eventingduckv1.AppliedEventPoliciesStatus `json:",inline"`
}
// ParallelBranchStatus represents the current state of a Parallel branch
diff --git a/pkg/apis/flows/v1/sequence_types.go b/pkg/apis/flows/v1/sequence_types.go
index e02af40b7e0..88461b092c8 100644
--- a/pkg/apis/flows/v1/sequence_types.go
+++ b/pkg/apis/flows/v1/sequence_types.go
@@ -20,7 +20,6 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
-
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/kmeta"
@@ -135,6 +134,10 @@ type SequenceStatus struct {
// Auth provides the relevant information for OIDC authentication.
// +optional
Auth *duckv1.AuthStatus `json:"auth,omitempty"`
+
+ // AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this Broker
+ // +optional
+ eventingduckv1.AppliedEventPoliciesStatus `json:",inline"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
diff --git a/pkg/apis/flows/v1/zz_generated.deepcopy.go b/pkg/apis/flows/v1/zz_generated.deepcopy.go
index 64a7474117b..03008035d86 100644
--- a/pkg/apis/flows/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/flows/v1/zz_generated.deepcopy.go
@@ -209,6 +209,7 @@ func (in *ParallelStatus) DeepCopyInto(out *ParallelStatus) {
*out = new(duckv1.AuthStatus)
(*in).DeepCopyInto(*out)
}
+ in.AppliedEventPoliciesStatus.DeepCopyInto(&out.AppliedEventPoliciesStatus)
return
}
@@ -376,6 +377,7 @@ func (in *SequenceStatus) DeepCopyInto(out *SequenceStatus) {
*out = new(duckv1.AuthStatus)
(*in).DeepCopyInto(*out)
}
+ in.AppliedEventPoliciesStatus.DeepCopyInto(&out.AppliedEventPoliciesStatus)
return
}
diff --git a/pkg/apis/messaging/v1/in_memory_channel_lifecycle_test.go b/pkg/apis/messaging/v1/in_memory_channel_lifecycle_test.go
index 34bb45a96ff..7c97a86ed21 100644
--- a/pkg/apis/messaging/v1/in_memory_channel_lifecycle_test.go
+++ b/pkg/apis/messaging/v1/in_memory_channel_lifecycle_test.go
@@ -150,7 +150,7 @@ func TestInMemoryChannelInitializeConditions(t *testing.T) {
}, {
name: "one false",
cs: &InMemoryChannelStatus{
- eventingduckv1.ChannelableStatus{
+ ChannelableStatus: eventingduckv1.ChannelableStatus{
Status: duckv1.Status{
Conditions: []apis.Condition{{
Type: InMemoryChannelConditionDispatcherReady,
@@ -160,7 +160,7 @@ func TestInMemoryChannelInitializeConditions(t *testing.T) {
},
},
want: &InMemoryChannelStatus{
- eventingduckv1.ChannelableStatus{
+ ChannelableStatus: eventingduckv1.ChannelableStatus{
Status: duckv1.Status{
Conditions: []apis.Condition{{
Type: InMemoryChannelConditionAddressable,
diff --git a/pkg/apis/messaging/v1/in_memory_channel_types.go b/pkg/apis/messaging/v1/in_memory_channel_types.go
index 5b0c971b54b..cdf0f6075ba 100644
--- a/pkg/apis/messaging/v1/in_memory_channel_types.go
+++ b/pkg/apis/messaging/v1/in_memory_channel_types.go
@@ -73,6 +73,10 @@ type InMemoryChannelSpec struct {
type InMemoryChannelStatus struct {
// Channel conforms to Duck type ChannelableStatus.
eventingduckv1.ChannelableStatus `json:",inline"`
+
+ // AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this Broker
+ // +optional
+ eventingduckv1.AppliedEventPoliciesStatus `json:",inline"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
diff --git a/pkg/apis/messaging/v1/zz_generated.deepcopy.go b/pkg/apis/messaging/v1/zz_generated.deepcopy.go
index fff2d237b49..f0e08f87f5f 100644
--- a/pkg/apis/messaging/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/messaging/v1/zz_generated.deepcopy.go
@@ -245,6 +245,7 @@ func (in *InMemoryChannelSpec) DeepCopy() *InMemoryChannelSpec {
func (in *InMemoryChannelStatus) DeepCopyInto(out *InMemoryChannelStatus) {
*out = *in
in.ChannelableStatus.DeepCopyInto(&out.ChannelableStatus)
+ in.AppliedEventPoliciesStatus.DeepCopyInto(&out.AppliedEventPoliciesStatus)
return
}
From 43cf75a2635cea836672eb403b910be2babaf648 Mon Sep 17 00:00:00 2001
From: Knative Automation
Date: Mon, 17 Jun 2024 02:24:54 -0400
Subject: [PATCH 13/33] [main] Upgrade to latest dependencies (#8004)
upgrade to latest dependencies
bumping knative.dev/pkg 15e6cdf...339c22b:
> 339c22b Add AuthenticatableType duck type (# 3056)
bumping knative.dev/reconciler-test 2023469...199a526:
> 199a526 upgrade to latest dependencies (# 736)
Signed-off-by: Knative Automation
---
go.mod | 4 +-
go.sum | 8 +-
.../pkg/apis/duck/v1/auth_types.go | 93 ++++++++++++++
.../pkg/apis/duck/v1/zz_generated.deepcopy.go | 81 ++++++++++++
.../ducks/duck/v1/authstatus/authstatus.go | 60 +++++++++
.../pkg/resolver/authenticatable_resolver.go | 117 ++++++++++++++++++
vendor/modules.txt | 5 +-
7 files changed, 360 insertions(+), 8 deletions(-)
create mode 100644 vendor/knative.dev/pkg/client/injection/ducks/duck/v1/authstatus/authstatus.go
create mode 100644 vendor/knative.dev/pkg/resolver/authenticatable_resolver.go
diff --git a/go.mod b/go.mod
index 9bbc20897a6..db0a2a1826c 100644
--- a/go.mod
+++ b/go.mod
@@ -51,8 +51,8 @@ require (
k8s.io/utils v0.0.0-20240102154912-e7106e64919e
knative.dev/hack v0.0.0-20240607132042-09143140a254
knative.dev/hack/schema v0.0.0-20240607132042-09143140a254
- knative.dev/pkg v0.0.0-20240610120318-15e6cdf2f386
- knative.dev/reconciler-test v0.0.0-20240607131348-2023469d1158
+ knative.dev/pkg v0.0.0-20240614135239-339c22b8218c
+ knative.dev/reconciler-test v0.0.0-20240611155001-199a5264927d
sigs.k8s.io/yaml v1.4.0
)
diff --git a/go.sum b/go.sum
index db5c1f49c54..3bfa4c71fdc 100644
--- a/go.sum
+++ b/go.sum
@@ -896,10 +896,10 @@ knative.dev/hack v0.0.0-20240607132042-09143140a254 h1:1YFnu3U6dWZg0oxm6GU8kEdA9
knative.dev/hack v0.0.0-20240607132042-09143140a254/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
knative.dev/hack/schema v0.0.0-20240607132042-09143140a254 h1:b9hFHGtxx0Kpm4EEjSD72lL0jms91To3OEVBTbqfOYI=
knative.dev/hack/schema v0.0.0-20240607132042-09143140a254/go.mod h1:3pWwBLnTZSM9psSgCAvhKOHIPTzqfEMlWRpDu6IYhK0=
-knative.dev/pkg v0.0.0-20240610120318-15e6cdf2f386 h1:nxFTT6DrXr70Zi2BK8nc57ts0/smyavd/uBRBbtqg94=
-knative.dev/pkg v0.0.0-20240610120318-15e6cdf2f386/go.mod h1:l7R8/SteYph0mZDsVgq3fVs4mWp1DaYx9BJJX68U6ik=
-knative.dev/reconciler-test v0.0.0-20240607131348-2023469d1158 h1:5yXdPOh4kh20K0/4p2KKWQd2taPZyo4XGH4wojwFaQg=
-knative.dev/reconciler-test v0.0.0-20240607131348-2023469d1158/go.mod h1:vn4ts7F9M2LrU07Sz0OQW3Ci92rv/JQ/4BW3crU7Xb4=
+knative.dev/pkg v0.0.0-20240614135239-339c22b8218c h1:OaKrY7L6rzWTvs51JlieJajL40F6CpBbvO1aZspg2EA=
+knative.dev/pkg v0.0.0-20240614135239-339c22b8218c/go.mod h1:l7R8/SteYph0mZDsVgq3fVs4mWp1DaYx9BJJX68U6ik=
+knative.dev/reconciler-test v0.0.0-20240611155001-199a5264927d h1:FBpgtMooLXWfl8QjGNVEosw9QGPhJzkPip+x5jBVrT8=
+knative.dev/reconciler-test v0.0.0-20240611155001-199a5264927d/go.mod h1:iKOTdGVwm+SmVA/blgirYTdYU/Kw3Znj2arDYLlhoXw=
pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw=
pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
diff --git a/vendor/knative.dev/pkg/apis/duck/v1/auth_types.go b/vendor/knative.dev/pkg/apis/duck/v1/auth_types.go
index 5d76a7b422c..dfb81cbe621 100644
--- a/vendor/knative.dev/pkg/apis/duck/v1/auth_types.go
+++ b/vendor/knative.dev/pkg/apis/duck/v1/auth_types.go
@@ -16,6 +16,21 @@ limitations under the License.
package v1
+import (
+ "context"
+ "fmt"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/pkg/apis"
+ "knative.dev/pkg/apis/duck/ducktypes"
+ "knative.dev/pkg/kmeta"
+ "knative.dev/pkg/ptr"
+)
+
+// +genduck
+
// AuthStatus is meant to provide the generated service account name
// in the resource status.
type AuthStatus struct {
@@ -28,3 +43,81 @@ type AuthStatus struct {
// when the component uses multiple identities (e.g. in case of a Parallel).
ServiceAccountNames []string `json:"serviceAccountNames,omitempty"`
}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AuthenticatableType is a skeleton type wrapping AuthStatus in the manner we expect
+// resource writers defining compatible resources to embed it. We will
+// typically use this type to deserialize AuthenticatableType ObjectReferences and
+// access the AuthenticatableType data. This is not a real resource.
+type AuthenticatableType struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Status AuthenticatableStatus `json:"status"`
+}
+
+type AuthenticatableStatus struct {
+ // Auth contains the service account name for the subscription
+ // +optional
+ Auth *AuthStatus `json:"auth,omitempty"`
+}
+
+var (
+ // AuthStatus is a Convertible type.
+ _ apis.Convertible = (*AuthStatus)(nil)
+
+ // Verify AuthenticatableType resources meet duck contracts.
+ _ apis.Listable = (*AuthenticatableType)(nil)
+ _ ducktypes.Populatable = (*AuthenticatableType)(nil)
+ _ kmeta.OwnerRefable = (*AuthenticatableType)(nil)
+)
+
+// GetFullType implements duck.Implementable
+func (*AuthStatus) GetFullType() ducktypes.Populatable {
+ return &AuthenticatableType{}
+}
+
+// ConvertTo implements apis.Convertible
+func (a *AuthStatus) ConvertTo(_ context.Context, to apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", to)
+}
+
+// ConvertFrom implements apis.Convertible
+func (a *AuthStatus) ConvertFrom(_ context.Context, from apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", from)
+}
+
+// Populate implements duck.Populatable
+func (t *AuthenticatableType) Populate() {
+ t.Status = AuthenticatableStatus{
+ Auth: &AuthStatus{
+ // Populate ALL fields
+ ServiceAccountName: ptr.String("foo"),
+ ServiceAccountNames: []string{
+ "bar",
+ "baz",
+ },
+ },
+ }
+}
+
+// GetGroupVersionKind implements kmeta.OwnerRefable
+func (t *AuthenticatableType) GetGroupVersionKind() schema.GroupVersionKind {
+ return t.GroupVersionKind()
+}
+
+// GetListType implements apis.Listable
+func (*AuthenticatableType) GetListType() runtime.Object {
+ return &AuthenticatableTypeList{}
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AuthenticatableTypeList is a list of AuthenticatableType resources
+type AuthenticatableTypeList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ Items []AuthenticatableType `json:"items"`
+}
diff --git a/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go b/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go
index 9dab1a912c9..bc263edfd67 100644
--- a/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go
+++ b/vendor/knative.dev/pkg/apis/duck/v1/zz_generated.deepcopy.go
@@ -176,6 +176,87 @@ func (in *AuthStatus) DeepCopy() *AuthStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticatableStatus) DeepCopyInto(out *AuthenticatableStatus) {
+ *out = *in
+ if in.Auth != nil {
+ in, out := &in.Auth, &out.Auth
+ *out = new(AuthStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticatableStatus.
+func (in *AuthenticatableStatus) DeepCopy() *AuthenticatableStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticatableStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticatableType) DeepCopyInto(out *AuthenticatableType) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticatableType.
+func (in *AuthenticatableType) DeepCopy() *AuthenticatableType {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticatableType)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuthenticatableType) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticatableTypeList) DeepCopyInto(out *AuthenticatableTypeList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]AuthenticatableType, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticatableTypeList.
+func (in *AuthenticatableTypeList) DeepCopy() *AuthenticatableTypeList {
+ if in == nil {
+ return nil
+ }
+ out := new(AuthenticatableTypeList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuthenticatableTypeList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Binding) DeepCopyInto(out *Binding) {
*out = *in
diff --git a/vendor/knative.dev/pkg/client/injection/ducks/duck/v1/authstatus/authstatus.go b/vendor/knative.dev/pkg/client/injection/ducks/duck/v1/authstatus/authstatus.go
new file mode 100644
index 00000000000..8f2a26501cc
--- /dev/null
+++ b/vendor/knative.dev/pkg/client/injection/ducks/duck/v1/authstatus/authstatus.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2022 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by injection-gen. DO NOT EDIT.
+
+package authstatus
+
+import (
+ context "context"
+
+ duck "knative.dev/pkg/apis/duck"
+ v1 "knative.dev/pkg/apis/duck/v1"
+ controller "knative.dev/pkg/controller"
+ injection "knative.dev/pkg/injection"
+ dynamicclient "knative.dev/pkg/injection/clients/dynamicclient"
+ logging "knative.dev/pkg/logging"
+)
+
+func init() {
+ injection.Default.RegisterDuck(WithDuck)
+}
+
+// Key is used for associating the Informer inside the context.Context.
+type Key struct{}
+
+func WithDuck(ctx context.Context) context.Context {
+ dc := dynamicclient.Get(ctx)
+ dif := &duck.CachedInformerFactory{
+ Delegate: &duck.TypedInformerFactory{
+ Client: dc,
+ Type: (&v1.AuthStatus{}).GetFullType(),
+ ResyncPeriod: controller.GetResyncPeriod(ctx),
+ StopChannel: ctx.Done(),
+ },
+ }
+ return context.WithValue(ctx, Key{}, dif)
+}
+
+// Get extracts the typed informer from the context.
+func Get(ctx context.Context) duck.InformerFactory {
+ untyped := ctx.Value(Key{})
+ if untyped == nil {
+ logging.FromContext(ctx).Panic(
+ "Unable to fetch knative.dev/pkg/apis/duck.InformerFactory from context.")
+ }
+ return untyped.(duck.InformerFactory)
+}
diff --git a/vendor/knative.dev/pkg/resolver/authenticatable_resolver.go b/vendor/knative.dev/pkg/resolver/authenticatable_resolver.go
new file mode 100644
index 00000000000..78dd69bddb4
--- /dev/null
+++ b/vendor/knative.dev/pkg/resolver/authenticatable_resolver.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resolver
+
+import (
+ "context"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/tools/cache"
+
+ corev1 "k8s.io/api/core/v1"
+ apierrs "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "knative.dev/pkg/client/injection/ducks/duck/v1/authstatus"
+ "knative.dev/pkg/controller"
+
+ pkgapisduck "knative.dev/pkg/apis/duck"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/tracker"
+)
+
+// AuthenticatableResolver resolves ObjectReferences into a AuthenticatableType.
+type AuthenticatableResolver struct {
+ tracker tracker.Interface
+ listerFactory func(schema.GroupVersionResource) (cache.GenericLister, error)
+}
+
+// NewAuthenticatableResolverFromTracker constructs a new AuthenticatableResolver with context and a tracker.
+func NewAuthenticatableResolverFromTracker(ctx context.Context, t tracker.Interface) *AuthenticatableResolver {
+ ret := &AuthenticatableResolver{
+ tracker: t,
+ }
+
+ informerFactory := &pkgapisduck.CachedInformerFactory{
+ Delegate: &pkgapisduck.EnqueueInformerFactory{
+ Delegate: authstatus.Get(ctx),
+ EventHandler: controller.HandleAll(ret.tracker.OnChanged),
+ },
+ }
+
+ ret.listerFactory = func(gvr schema.GroupVersionResource) (cache.GenericLister, error) {
+ _, l, err := informerFactory.Get(ctx, gvr)
+ return l, err
+ }
+
+ return ret
+}
+
+// AuthStatusFromObjectReference returns the AuthStatus from an object
+func (r *AuthenticatableResolver) AuthStatusFromObjectReference(ref *corev1.ObjectReference, parent interface{}) (*duckv1.AuthStatus, error) {
+ if ref == nil {
+ return nil, apierrs.NewBadRequest("ref is nil")
+ }
+
+ authenticatable, err := r.authenticatableFromObjectReference(ref, parent)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get authenticatable %s/%s: %w", ref.Namespace, ref.Name, err)
+ }
+
+ if authenticatable.Status.Auth == nil {
+ return nil, fmt.Errorf(".status.auth is missing in object %s/%s", ref.Namespace, ref.Name)
+ }
+
+ return authenticatable.Status.Auth, nil
+}
+
+// authenticatableFromObjectReference resolves an object reference into an AuthenticatableType
+func (r *AuthenticatableResolver) authenticatableFromObjectReference(ref *corev1.ObjectReference, parent interface{}) (*duckv1.AuthenticatableType, error) {
+ if ref == nil {
+ return nil, apierrs.NewBadRequest("ref is nil")
+ }
+
+ gvr, _ := meta.UnsafeGuessKindToResource(ref.GroupVersionKind())
+ if err := r.tracker.TrackReference(tracker.Reference{
+ APIVersion: ref.APIVersion,
+ Kind: ref.Kind,
+ Namespace: ref.Namespace,
+ Name: ref.Name,
+ }, parent); err != nil {
+ return nil, fmt.Errorf("failed to track reference %s %s/%s: %w", gvr.String(), ref.Namespace, ref.Name, err)
+ }
+
+ lister, err := r.listerFactory(gvr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get lister for %s: %w", gvr.String(), err)
+ }
+
+ obj, err := lister.ByNamespace(ref.Namespace).Get(ref.Name)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get object %s/%s: %w", ref.Namespace, ref.Name, err)
+ }
+
+ authenticatable, ok := obj.(*duckv1.AuthenticatableType)
+ if !ok {
+ return nil, apierrs.NewBadRequest(fmt.Sprintf("%s(%T) is not an AuthenticatableType", ref, ref))
+ }
+
+ // Do not modify informer copy.
+ authenticatable = authenticatable.DeepCopy()
+
+ return authenticatable, nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 82f181cd65c..b43bc630a62 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1252,7 +1252,7 @@ knative.dev/hack/schema/commands
knative.dev/hack/schema/docs
knative.dev/hack/schema/registry
knative.dev/hack/schema/schema
-# knative.dev/pkg v0.0.0-20240610120318-15e6cdf2f386
+# knative.dev/pkg v0.0.0-20240614135239-339c22b8218c
## explicit; go 1.21
knative.dev/pkg/apiextensions/storageversion
knative.dev/pkg/apiextensions/storageversion/cmd/migrate
@@ -1274,6 +1274,7 @@ knative.dev/pkg/client/injection/apiextensions/informers/factory/fake
knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition
knative.dev/pkg/client/injection/ducks/duck/v1/addressable
knative.dev/pkg/client/injection/ducks/duck/v1/addressable/fake
+knative.dev/pkg/client/injection/ducks/duck/v1/authstatus
knative.dev/pkg/client/injection/ducks/duck/v1/conditions
knative.dev/pkg/client/injection/ducks/duck/v1/conditions/fake
knative.dev/pkg/client/injection/ducks/duck/v1/kresource
@@ -1405,7 +1406,7 @@ knative.dev/pkg/webhook/resourcesemantics
knative.dev/pkg/webhook/resourcesemantics/conversion
knative.dev/pkg/webhook/resourcesemantics/defaulting
knative.dev/pkg/webhook/resourcesemantics/validation
-# knative.dev/reconciler-test v0.0.0-20240607131348-2023469d1158
+# knative.dev/reconciler-test v0.0.0-20240611155001-199a5264927d
## explicit; go 1.21
knative.dev/reconciler-test/cmd/eventshub
knative.dev/reconciler-test/pkg/environment
From 0eee3018d70cbb67fc5c56c75ac755660888361e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christoph=20St=C3=A4bler?=
Date: Mon, 17 Jun 2024 11:15:45 +0200
Subject: [PATCH 14/33] Propagate read error correctly in event-dispatcher
(#8005)
---
pkg/kncloudevents/event_dispatcher.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/pkg/kncloudevents/event_dispatcher.go b/pkg/kncloudevents/event_dispatcher.go
index f717ddafd8d..8a0176238a0 100644
--- a/pkg/kncloudevents/event_dispatcher.go
+++ b/pkg/kncloudevents/event_dispatcher.go
@@ -352,6 +352,7 @@ func (d *Dispatcher) executeRequest(ctx context.Context, target duckv1.Addressab
var responseMessageBody []byte
if err != nil && err != io.EOF {
responseMessageBody = []byte(fmt.Sprintf("Failed to read response body: %s", err.Error()))
+ dispatchInfo.ResponseCode = http.StatusInternalServerError
} else {
responseMessageBody = body.Bytes()
dispatchInfo.ResponseBody = responseMessageBody
From 2157639064d6e1d275d26ee8580b899413a500cd Mon Sep 17 00:00:00 2001
From: Pierangelo Di Pilato
Date: Mon, 17 Jun 2024 12:53:24 +0200
Subject: [PATCH 15/33] Add validation for EventPolicy sub suffix matching
(#8008)
Signed-off-by: Pierangelo Di Pilato
---
.../v1alpha1/eventpolicy_validation.go | 16 ++++++
.../v1alpha1/eventpolicy_validation_test.go | 52 +++++++++++++++++++
2 files changed, 68 insertions(+)
diff --git a/pkg/apis/eventing/v1alpha1/eventpolicy_validation.go b/pkg/apis/eventing/v1alpha1/eventpolicy_validation.go
index 6c4eafb5caa..0c267b31968 100644
--- a/pkg/apis/eventing/v1alpha1/eventpolicy_validation.go
+++ b/pkg/apis/eventing/v1alpha1/eventpolicy_validation.go
@@ -18,6 +18,7 @@ package v1alpha1
import (
"context"
+ "strings"
"knative.dev/pkg/apis"
)
@@ -36,6 +37,7 @@ func (ets *EventPolicySpec) Validate(ctx context.Context) *apis.FieldError {
err = err.Also(apis.ErrMultipleOneOf("ref", "sub").ViaFieldIndex("from", i))
}
err = err.Also(f.Ref.Validate().ViaField("ref").ViaFieldIndex("from", i))
+ err = err.Also(validateSub(f.Sub).ViaField("sub").ViaFieldIndex("from", i))
}
for i, t := range ets.To {
@@ -53,6 +55,20 @@ func (ets *EventPolicySpec) Validate(ctx context.Context) *apis.FieldError {
return err
}
+func validateSub(sub *string) *apis.FieldError {
+ if sub == nil || len(*sub) <= 1 {
+ return nil
+ }
+
+ lastInvalidIdx := len(*sub) - 2
+ firstInvalidIdx := 0
+ if idx := strings.IndexRune(*sub, '*'); idx >= firstInvalidIdx && idx <= lastInvalidIdx {
+ return apis.ErrInvalidValue(*sub, "", "'*' is only allowed as suffix")
+ }
+
+ return nil
+}
+
func (r *EventPolicyFromReference) Validate() *apis.FieldError {
if r == nil {
return nil
diff --git a/pkg/apis/eventing/v1alpha1/eventpolicy_validation_test.go b/pkg/apis/eventing/v1alpha1/eventpolicy_validation_test.go
index c4b388b0291..da103fd069f 100644
--- a/pkg/apis/eventing/v1alpha1/eventpolicy_validation_test.go
+++ b/pkg/apis/eventing/v1alpha1/eventpolicy_validation_test.go
@@ -196,6 +196,58 @@ func TestEventPolicySpecValidation(t *testing.T) {
return apis.ErrMissingField("apiVersion").ViaField("ref").ViaFieldIndex("to", 0).ViaField("spec")
}(),
},
+ {
+ name: "invalid, from.sub '*' set as infix",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ From: []EventPolicySpecFrom{{
+ Sub: ptr.String("a*c"),
+ }},
+ },
+ },
+ want: func() *apis.FieldError {
+ return apis.ErrInvalidValue("a*c", "sub", "'*' is only allowed as suffix").ViaFieldIndex("from", 0).ViaField("spec")
+ }(),
+ },
+ {
+ name: "invalid, from.sub '*' set as prefix",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ From: []EventPolicySpecFrom{{
+ Sub: ptr.String("*a"),
+ }},
+ },
+ },
+ want: func() *apis.FieldError {
+ return apis.ErrInvalidValue("*a", "sub", "'*' is only allowed as suffix").ViaFieldIndex("from", 0).ViaField("spec")
+ }(),
+ },
+ {
+ name: "valid, from.sub '*' set as suffix",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ From: []EventPolicySpecFrom{{
+ Sub: ptr.String("a*"),
+ }},
+ },
+ },
+ want: func() *apis.FieldError {
+ return nil
+ }(),
+ },
+ {
+ name: "valid, from.sub exactly '*'",
+ ep: &EventPolicy{
+ Spec: EventPolicySpec{
+ From: []EventPolicySpecFrom{{
+ Sub: ptr.String("*"),
+ }},
+ },
+ },
+ want: func() *apis.FieldError {
+ return nil
+ }(),
+ },
}
for _, test := range tests {
From 1a21fee774976a12a072f6bbffbc49cd59c29e02 Mon Sep 17 00:00:00 2001
From: Pierangelo Di Pilato
Date: Mon, 17 Jun 2024 14:18:54 +0200
Subject: [PATCH 16/33] Add all JobSink symlinks in config/ (#8007)
Signed-off-by: Pierangelo Di Pilato
---
config/200-job-sink-clusterrole.yaml | 1 +
config/200-job-sink-serviceaccount.yaml | 1 +
config/300-jobsink.yaml | 1 +
3 files changed, 3 insertions(+)
create mode 120000 config/200-job-sink-clusterrole.yaml
create mode 120000 config/200-job-sink-serviceaccount.yaml
create mode 120000 config/300-jobsink.yaml
diff --git a/config/200-job-sink-clusterrole.yaml b/config/200-job-sink-clusterrole.yaml
new file mode 120000
index 00000000000..cfbddb9b567
--- /dev/null
+++ b/config/200-job-sink-clusterrole.yaml
@@ -0,0 +1 @@
+core/roles/job-sink-clusterrole.yaml
\ No newline at end of file
diff --git a/config/200-job-sink-serviceaccount.yaml b/config/200-job-sink-serviceaccount.yaml
new file mode 120000
index 00000000000..5538c5748dc
--- /dev/null
+++ b/config/200-job-sink-serviceaccount.yaml
@@ -0,0 +1 @@
+core/200-job-sink-serviceaccount.yaml
\ No newline at end of file
diff --git a/config/300-jobsink.yaml b/config/300-jobsink.yaml
new file mode 120000
index 00000000000..ff15b026fe3
--- /dev/null
+++ b/config/300-jobsink.yaml
@@ -0,0 +1 @@
+core/resources/jobsink.yaml
\ No newline at end of file
From e298f32440e45c31a46a4abae50c62a5c8dc2e98 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christoph=20St=C3=A4bler?=
Date: Mon, 17 Jun 2024 15:17:15 +0200
Subject: [PATCH 17/33] Add authz library (#8002)
* Add function to get EventPolicies of resource
* Switch from kmeta.Accessor to GVK and ObjectMeta as reconcilers don't provision TypeMeta field on resouces
* Add function comment
* WIP Add ResolveSubjects function
* Switch to authenticatable resolver
* Add more unit tests for ResolveSubjects
* Switch to strings.EqualFold for ref comparing
* Add SubjectContained method to check if subject is contained in list of allowed subjects
* Add missing boilerplate
* Run goimports and gofmt
* Use label selectors
* Move check for empty .spec.to at top
* Remove unneeded code
* Ignore references APIVersion version part, when listing EPs for a resource
* Switch to native GroupVersion parsing functions
* Remove unneeded strings.ToLower()
---
pkg/auth/event_policy.go | 147 +++++++
pkg/auth/event_policy_test.go | 766 ++++++++++++++++++++++++++++++++++
2 files changed, 913 insertions(+)
create mode 100644 pkg/auth/event_policy.go
create mode 100644 pkg/auth/event_policy_test.go
diff --git a/pkg/auth/event_policy.go b/pkg/auth/event_policy.go
new file mode 100644
index 00000000000..26efd163409
--- /dev/null
+++ b/pkg/auth/event_policy.go
@@ -0,0 +1,147 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package auth
+
+import (
+ "fmt"
+ "strings"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ listerseventingv1alpha1 "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1"
+ "knative.dev/pkg/resolver"
+)
+
+// GetEventPoliciesForResource returns the applying EventPolicies for a given resource
+func GetEventPoliciesForResource(lister listerseventingv1alpha1.EventPolicyLister, resourceGVK schema.GroupVersionKind, resourceObjectMeta metav1.ObjectMeta) ([]*v1alpha1.EventPolicy, error) {
+ policies, err := lister.EventPolicies(resourceObjectMeta.GetNamespace()).List(labels.Everything())
+ if err != nil {
+ return nil, fmt.Errorf("failed to list eventpolicies: %w", err)
+ }
+
+ relevantPolicies := []*v1alpha1.EventPolicy{}
+
+ for _, policy := range policies {
+ if len(policy.Spec.To) == 0 {
+ // policy applies to all resources in namespace
+ relevantPolicies = append(relevantPolicies, policy)
+ }
+
+ for _, to := range policy.Spec.To {
+ if to.Ref != nil {
+ refGV, err := schema.ParseGroupVersion(to.Ref.APIVersion)
+ if err != nil {
+ return nil, fmt.Errorf("cannot split apiVersion into group and version: %s", to.Ref.APIVersion)
+ }
+
+ if strings.EqualFold(to.Ref.Name, resourceObjectMeta.GetName()) &&
+ strings.EqualFold(refGV.Group, resourceGVK.Group) &&
+ strings.EqualFold(to.Ref.Kind, resourceGVK.Kind) {
+
+ relevantPolicies = append(relevantPolicies, policy)
+ break // no need to check the other .spec.to's from this policy
+ }
+ }
+
+ if to.Selector != nil {
+ selectorGV, err := schema.ParseGroupVersion(to.Selector.APIVersion)
+ if err != nil {
+ return nil, fmt.Errorf("cannot split apiVersion into group and version: %s", to.Selector.APIVersion)
+ }
+
+ if strings.EqualFold(selectorGV.Group, resourceGVK.Group) &&
+ strings.EqualFold(to.Selector.Kind, resourceGVK.Kind) {
+
+ selector, err := metav1.LabelSelectorAsSelector(to.Selector.LabelSelector)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse selector: %w", err)
+ }
+
+ if selector.Matches(labels.Set(resourceObjectMeta.Labels)) {
+ relevantPolicies = append(relevantPolicies, policy)
+ break // no need to check the other .spec.to's from this policy
+ }
+ }
+ }
+ }
+ }
+
+ return relevantPolicies, nil
+}
+
+// ResolveSubjects returns the OIDC service accounts names for the objects referenced in the EventPolicySpecFrom.
+func ResolveSubjects(resolver *resolver.AuthenticatableResolver, eventPolicy *v1alpha1.EventPolicy) ([]string, error) {
+ allSAs := []string{}
+ for _, from := range eventPolicy.Spec.From {
+ if from.Ref != nil {
+ sas, err := resolveSubjectsFromReference(resolver, *from.Ref, eventPolicy)
+ if err != nil {
+ return nil, fmt.Errorf("could not resolve subjects from reference: %w", err)
+ }
+ allSAs = append(allSAs, sas...)
+ } else if from.Sub != nil {
+ allSAs = append(allSAs, *from.Sub)
+ }
+ }
+
+ return allSAs, nil
+}
+
+func resolveSubjectsFromReference(resolver *resolver.AuthenticatableResolver, reference v1alpha1.EventPolicyFromReference, trackingEventPolicy *v1alpha1.EventPolicy) ([]string, error) {
+ authStatus, err := resolver.AuthStatusFromObjectReference(&corev1.ObjectReference{
+ APIVersion: reference.APIVersion,
+ Kind: reference.Kind,
+ Namespace: reference.Namespace,
+ Name: reference.Name,
+ }, trackingEventPolicy)
+
+ if err != nil {
+ return nil, fmt.Errorf("could not resolve auth status: %w", err)
+ }
+
+ objSAs := authStatus.ServiceAccountNames
+ if authStatus.ServiceAccountName != nil {
+ objSAs = append(objSAs, *authStatus.ServiceAccountName)
+ }
+
+ objFullSANames := make([]string, 0, len(objSAs))
+ for _, sa := range objSAs {
+ objFullSANames = append(objFullSANames, fmt.Sprintf("system:serviceaccount:%s:%s", reference.Namespace, sa))
+ }
+
+ return objFullSANames, nil
+}
+
+// SubjectContained checks if the given sub is contained in the list of allowedSubs
+// or if it matches a prefix pattern in subs (e.g. system:serviceaccounts:my-ns:*)
+func SubjectContained(sub string, allowedSubs []string) bool {
+ for _, s := range allowedSubs {
+ if strings.EqualFold(s, sub) {
+ return true
+ }
+
+ if strings.HasSuffix(s, "*") &&
+ strings.HasPrefix(sub, strings.TrimSuffix(s, "*")) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/pkg/auth/event_policy_test.go b/pkg/auth/event_policy_test.go
new file mode 100644
index 00000000000..64f972c7ba6
--- /dev/null
+++ b/pkg/auth/event_policy_test.go
@@ -0,0 +1,766 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package auth
+
+import (
+ "context"
+ "strings"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/strings/slices"
+ eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1"
+ "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+ eventpolicyinformerfake "knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/fake"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/client/injection/ducks/duck/v1/authstatus"
+ fakedynamicclient "knative.dev/pkg/injection/clients/dynamicclient/fake"
+ "knative.dev/pkg/ptr"
+ reconcilertesting "knative.dev/pkg/reconciler/testing"
+ "knative.dev/pkg/resolver"
+ "knative.dev/pkg/tracker"
+)
+
+func TestGetEventPoliciesForResource(t *testing.T) {
+
+ tests := []struct {
+ name string
+ resourceObjectMeta metav1.ObjectMeta
+ existingPolicies []v1alpha1.EventPolicy
+ want []string
+ wantErr bool
+ }{
+ {
+ name: "No match",
+ resourceObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: "my-namespace",
+ },
+ existingPolicies: []v1alpha1.EventPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy-1",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Ref: &v1alpha1.EventPolicyToReference{
+ Name: "another-broker",
+ Kind: "Broker",
+ APIVersion: "eventing.knative.dev/v1",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: []string{},
+ }, {
+ name: "No match (different namespace)",
+ resourceObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: "my-namespace",
+ },
+ existingPolicies: []v1alpha1.EventPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy-1",
+ Namespace: "another-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Ref: &v1alpha1.EventPolicyToReference{
+ Name: "my-broker",
+ Kind: "Broker",
+ APIVersion: "eventing.knative.dev/v1",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: []string{},
+ }, {
+ name: "Match all (empty .spec.to)",
+ resourceObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: "my-namespace",
+ },
+ existingPolicies: []v1alpha1.EventPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy-1",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: nil,
+ },
+ },
+ },
+ want: []string{
+ "my-policy-1",
+ },
+ }, {
+ name: "Direct reference to resource",
+ resourceObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: "my-namespace",
+ },
+ existingPolicies: []v1alpha1.EventPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy-1",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Ref: &v1alpha1.EventPolicyToReference{
+ Name: "my-broker",
+ Kind: "Broker",
+ APIVersion: "eventing.knative.dev/v1",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: []string{
+ "my-policy-1",
+ },
+ }, {
+ name: "Reference via selector to resource",
+ resourceObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: "my-namespace",
+ Labels: map[string]string{
+ "key": "value",
+ },
+ },
+ existingPolicies: []v1alpha1.EventPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy-1",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Selector: &v1alpha1.EventPolicySelector{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "value",
+ },
+ },
+ TypeMeta: &metav1.TypeMeta{
+ Kind: "Broker",
+ APIVersion: "eventing.knative.dev/v1",
+ },
+ },
+ },
+ },
+ },
+ }, {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "another-policy",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Selector: &v1alpha1.EventPolicySelector{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "another-key": "value",
+ },
+ },
+ TypeMeta: &metav1.TypeMeta{
+ Kind: "Broker",
+ APIVersion: "eventing.knative.dev/v1",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ want: []string{
+ "my-policy-1",
+ },
+ }, {
+ name: "Reference via selector to resource (multiple policies)",
+ resourceObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: "my-namespace",
+ Labels: map[string]string{
+ "key": "value",
+ },
+ },
+ existingPolicies: []v1alpha1.EventPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy-1",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Selector: &v1alpha1.EventPolicySelector{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "value",
+ },
+ },
+ TypeMeta: &metav1.TypeMeta{
+ Kind: "Broker",
+ APIVersion: "eventing.knative.dev/v1",
+ },
+ },
+ },
+ },
+ },
+ }, {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "another-policy",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Ref: &v1alpha1.EventPolicyToReference{
+ Name: "my-broker",
+ Kind: "Broker",
+ APIVersion: "eventing.knative.dev/v1",
+ },
+ },
+ },
+ },
+ }, {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "another-policy-2",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Selector: &v1alpha1.EventPolicySelector{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "value",
+ },
+ },
+ TypeMeta: &metav1.TypeMeta{
+ Kind: "Another-Kind",
+ APIVersion: "eventing.knative.dev/v1",
+ },
+ },
+ },
+ },
+ },
+ }, {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "another-policy-3",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Selector: &v1alpha1.EventPolicySelector{
+ LabelSelector: &metav1.LabelSelector{
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "key",
+ Operator: metav1.LabelSelectorOpExists,
+ },
+ },
+ },
+ TypeMeta: &metav1.TypeMeta{
+ Kind: "Broker",
+ APIVersion: "eventing.knative.dev/v1",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ want: []string{
+ "my-policy-1",
+ "another-policy",
+ "another-policy-3",
+ },
+ }, {
+ name: "Reference via selector to resource (multiple policies - not all matching)",
+ resourceObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: "my-namespace",
+ Labels: map[string]string{
+ "key": "value",
+ },
+ },
+ existingPolicies: []v1alpha1.EventPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy-1",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Selector: &v1alpha1.EventPolicySelector{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "value",
+ },
+ },
+ TypeMeta: &metav1.TypeMeta{
+ Kind: "Broker",
+ APIVersion: "eventing.knative.dev/v1",
+ },
+ },
+ },
+ },
+ },
+ }, {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "another-policy",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Selector: &v1alpha1.EventPolicySelector{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "another-key": "value",
+ },
+ },
+ TypeMeta: &metav1.TypeMeta{
+ Kind: "Broker",
+ APIVersion: "eventing.knative.dev/v1",
+ },
+ },
+ },
+ },
+ },
+ }, {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "another-policy-2",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Selector: &v1alpha1.EventPolicySelector{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "value",
+ },
+ },
+ TypeMeta: &metav1.TypeMeta{
+ Kind: "Another-Kind",
+ APIVersion: "eventing.knative.dev/v1",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ want: []string{
+ "my-policy-1",
+ },
+ }, {
+ name: "Match (ignore ref.APIVersion version)",
+ resourceObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: "my-namespace",
+ },
+ existingPolicies: []v1alpha1.EventPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy-1",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Ref: &v1alpha1.EventPolicyToReference{
+ Name: "my-broker",
+ Kind: "Broker",
+ APIVersion: "eventing.knative.dev/v12345",
+ },
+ },
+ },
+ },
+ },
+ },
+ want: []string{
+ "my-policy-1",
+ },
+ }, {
+ name: "Match (ignore selector.APIVersion version)",
+ resourceObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: "my-namespace",
+ Labels: map[string]string{
+ "key": "value",
+ },
+ },
+ existingPolicies: []v1alpha1.EventPolicy{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy-1",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Selector: &v1alpha1.EventPolicySelector{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "value",
+ },
+ },
+ TypeMeta: &metav1.TypeMeta{
+ Kind: "Broker",
+ APIVersion: "eventing.knative.dev/v12345",
+ },
+ },
+ },
+ },
+ },
+ }, {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "another-policy",
+ Namespace: "my-namespace",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Selector: &v1alpha1.EventPolicySelector{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "another-key": "value",
+ },
+ },
+ TypeMeta: &metav1.TypeMeta{
+ Kind: "Broker",
+ APIVersion: "eventing.knative.dev/v1",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ want: []string{
+ "my-policy-1",
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx, _ := reconcilertesting.SetupFakeContext(t)
+
+ for i := range tt.existingPolicies {
+ err := eventpolicyinformerfake.Get(ctx).Informer().GetStore().Add(&tt.existingPolicies[i])
+ if err != nil {
+ t.Fatalf("error adding policies: %v", err)
+ }
+ }
+
+ brokerGVK := eventingv1.SchemeGroupVersion.WithKind("Broker")
+ got, err := GetEventPoliciesForResource(eventpolicyinformerfake.Get(ctx).Lister(), brokerGVK, tt.resourceObjectMeta)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("GetEventPoliciesForResource() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+
+ gotNames := make([]string, 0, len(got))
+ for _, p := range got {
+ gotNames = append(gotNames, p.Name)
+ }
+
+ if len(gotNames) != len(tt.want) {
+ t.Errorf("GetEventPoliciesForResource() len(got) = %d, want %d", len(gotNames), len(tt.want))
+ }
+
+ for _, wantName := range tt.want {
+ if !slices.Contains(gotNames, wantName) {
+ t.Errorf("GetEventPoliciesForResource() got = %q, want %q. Missing %q", strings.Join(gotNames, ","), strings.Join(tt.want, ","), wantName)
+ }
+ }
+ })
+ }
+}
+
+func TestResolveSubjects(t *testing.T) {
+ namespace := "my-ns"
+
+ tests := []struct {
+ name string
+ froms []v1alpha1.EventPolicySpecFrom
+ objects []runtime.Object
+ want []string
+ wantErr bool
+ }{
+ {
+ name: "simple",
+ froms: []v1alpha1.EventPolicySpecFrom{
+ {
+ Ref: &v1alpha1.EventPolicyFromReference{
+ APIVersion: "sources.knative.dev/v1",
+ Kind: "ApiServerSource",
+ Name: "my-source",
+ Namespace: namespace,
+ },
+ }, {
+ Sub: ptr.String("system:serviceaccount:my-ns:my-app"),
+ }, {
+ Sub: ptr.String("system:serviceaccount:my-ns:my-app-2"),
+ },
+ },
+ objects: []runtime.Object{
+ &sourcesv1.ApiServerSource{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-source",
+ Namespace: namespace,
+ },
+ Status: sourcesv1.ApiServerSourceStatus{
+ SourceStatus: duckv1.SourceStatus{
+ Auth: &duckv1.AuthStatus{
+ ServiceAccountName: ptr.String("my-apiserversource-oidc-sa"),
+ },
+ },
+ },
+ },
+ &eventingv1.Broker{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: namespace,
+ },
+ Status: eventingv1.BrokerStatus{},
+ },
+ },
+ want: []string{
+ "system:serviceaccount:my-ns:my-apiserversource-oidc-sa",
+ "system:serviceaccount:my-ns:my-app",
+ "system:serviceaccount:my-ns:my-app-2",
+ },
+ }, {
+ name: "multiple references",
+ froms: []v1alpha1.EventPolicySpecFrom{
+ {
+ Ref: &v1alpha1.EventPolicyFromReference{
+ APIVersion: "sources.knative.dev/v1",
+ Kind: "ApiServerSource",
+ Name: "my-source",
+ Namespace: namespace,
+ },
+ }, {
+ Ref: &v1alpha1.EventPolicyFromReference{
+ APIVersion: "sources.knative.dev/v1",
+ Kind: "PingSource",
+ Name: "my-pingsource",
+ Namespace: namespace,
+ },
+ }, {
+ Sub: ptr.String("system:serviceaccount:my-ns:my-app"),
+ }, {
+ Sub: ptr.String("system:serviceaccount:my-ns:my-app-2"),
+ },
+ },
+ objects: []runtime.Object{
+ &sourcesv1.ApiServerSource{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-source",
+ Namespace: namespace,
+ },
+ Status: sourcesv1.ApiServerSourceStatus{
+ SourceStatus: duckv1.SourceStatus{
+ Auth: &duckv1.AuthStatus{
+ ServiceAccountName: ptr.String("my-apiserversource-oidc-sa"),
+ },
+ },
+ },
+ },
+ &sourcesv1.PingSource{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-pingsource",
+ Namespace: namespace,
+ },
+ Status: sourcesv1.PingSourceStatus{
+ SourceStatus: duckv1.SourceStatus{
+ Auth: &duckv1.AuthStatus{
+ ServiceAccountName: ptr.String("my-pingsource-oidc-sa"),
+ },
+ },
+ },
+ },
+ &eventingv1.Broker{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: namespace,
+ },
+ Status: eventingv1.BrokerStatus{},
+ },
+ },
+ want: []string{
+ "system:serviceaccount:my-ns:my-apiserversource-oidc-sa",
+ "system:serviceaccount:my-ns:my-pingsource-oidc-sa",
+ "system:serviceaccount:my-ns:my-app",
+ "system:serviceaccount:my-ns:my-app-2",
+ },
+ }, {
+ name: "reference has not auth status",
+ froms: []v1alpha1.EventPolicySpecFrom{
+ {
+ Ref: &v1alpha1.EventPolicyFromReference{
+ APIVersion: "eventing.knative.dev/v1",
+ Kind: "Broker",
+ Name: "my-broker",
+ Namespace: namespace,
+ },
+ },
+ },
+ objects: []runtime.Object{
+ &eventingv1.Broker{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: namespace,
+ },
+ Status: eventingv1.BrokerStatus{},
+ },
+ },
+ want: nil,
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+
+ ctx, _ := fakedynamicclient.With(context.Background(), scheme.Scheme, tt.objects...)
+ ctx = authstatus.WithDuck(ctx)
+ r := resolver.NewAuthenticatableResolverFromTracker(ctx, tracker.New(func(types.NamespacedName) {}, 0))
+
+ ep := &v1alpha1.EventPolicy{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy",
+ Namespace: "my-ns",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ From: tt.froms,
+ },
+ }
+
+ got, gotErr := ResolveSubjects(r, ep)
+ if (gotErr != nil) != tt.wantErr {
+ t.Errorf("ResolveSubjects() error = %v, wantErr %v", gotErr, tt.wantErr)
+ return
+ }
+
+ if !cmp.Equal(got, tt.want) {
+ t.Errorf("Unexpected object (-want, +got) =\n%s", cmp.Diff(got, tt.want))
+ }
+ })
+ }
+}
+
+func TestSubjectContained(t *testing.T) {
+
+ tests := []struct {
+ name string
+ sub string
+ allowedSubs []string
+ want bool
+ }{
+ {
+ name: "simple 1:1 match",
+ sub: "system:serviceaccounts:my-ns:my-sa",
+ allowedSubs: []string{
+ "system:serviceaccounts:my-ns:my-sa",
+ },
+ want: true,
+ }, {
+ name: "simple 1:n match",
+ sub: "system:serviceaccounts:my-ns:my-sa",
+ allowedSubs: []string{
+ "system:serviceaccounts:my-ns:another-sa",
+ "system:serviceaccounts:my-ns:my-sa",
+ "system:serviceaccounts:my-ns:yet-another-sa",
+ },
+ want: true,
+ }, {
+ name: "pattern match (all)",
+ sub: "system:serviceaccounts:my-ns:my-sa",
+ allowedSubs: []string{
+ "*",
+ },
+ want: true,
+ }, {
+ name: "pattern match (namespace)",
+ sub: "system:serviceaccounts:my-ns:my-sa",
+ allowedSubs: []string{
+ "system:serviceaccounts:my-ns:*",
+ },
+ want: true,
+ }, {
+ name: "pattern match (different namespace)",
+ sub: "system:serviceaccounts:my-ns-2:my-sa",
+ allowedSubs: []string{
+ "system:serviceaccounts:my-ns:*",
+ },
+ want: false,
+ }, {
+ name: "pattern match (namespace prefix)",
+ sub: "system:serviceaccounts:my-ns:my-sa",
+ allowedSubs: []string{
+ "system:serviceaccounts:my-ns*",
+ },
+ want: true,
+ }, {
+ name: "pattern match (namespace prefix 2)",
+ sub: "system:serviceaccounts:my-ns-2:my-sa",
+ allowedSubs: []string{
+ "system:serviceaccounts:my-ns*",
+ },
+ want: true,
+ }, {
+ name: "pattern match (middle)",
+ sub: "system:serviceaccounts:my-ns:my-sa",
+ allowedSubs: []string{
+ "system:serviceaccounts:*:my-sa",
+ },
+ want: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := SubjectContained(tt.sub, tt.allowedSubs); got != tt.want {
+ t.Errorf("SubjectContained(%q, '%v') = %v, want %v", tt.sub, tt.allowedSubs, got, tt.want)
+ }
+ })
+ }
+}
From 18dfe3c0ac90b756330eeae2a375190f36e2cdc8 Mon Sep 17 00:00:00 2001
From: Pierangelo Di Pilato
Date: Thu, 20 Jun 2024 10:59:17 +0200
Subject: [PATCH 18/33] JobSink: Test OIDC support (#8000)
* JobSink: Test OIDC support
Signed-off-by: Pierangelo Di Pilato
* Fix tests and add prerequisites
Signed-off-by: Pierangelo Di Pilato
* Fix e2e source sent event assertions
Signed-off-by: Pierangelo Di Pilato
* Fix configmap watcher to watch config-features changes
Signed-off-by: Pierangelo Di Pilato
---------
Signed-off-by: Pierangelo Di Pilato
---
cmd/jobsink/main.go | 68 ++++++++++++----------
pkg/apis/feature/store.go | 4 +-
test/rekt/features/jobsink/jobsink.go | 83 +++++++++++++++++++++++++++
test/rekt/job_sink_test.go | 15 +++++
4 files changed, 139 insertions(+), 31 deletions(-)
diff --git a/cmd/jobsink/main.go b/cmd/jobsink/main.go
index bb4d8b6fcca..0f8c99646b4 100644
--- a/cmd/jobsink/main.go
+++ b/cmd/jobsink/main.go
@@ -103,18 +103,19 @@ func main() {
logger.Info("Starting the JobSink Ingress")
- featureStore := feature.NewStore(logging.FromContext(ctx).Named("feature-config-store"), func(name string, value interface{}) {})
+ featureStore := feature.NewStore(logging.FromContext(ctx).Named("feature-config-store"), func(name string, value interface{}) {
+ logger.Info("Updated", zap.String("name", name), zap.Any("value", value))
+ })
featureStore.WatchConfigs(configMapWatcher)
// Decorate contexts with the current state of the feature config.
ctxFunc := func(ctx context.Context) context.Context {
- return featureStore.ToContext(ctx)
+ return logging.WithLogger(featureStore.ToContext(ctx), sl)
}
h := &Handler{
k8s: kubeclient.Get(ctx),
lister: jobsink.Get(ctx).Lister(),
- logger: logger,
withContext: ctxFunc,
oidcTokenVerifier: auth.NewOIDCTokenVerifier(ctx),
}
@@ -135,6 +136,12 @@ func main() {
log.Fatal(err)
}
+ // configMapWatcher does not block, so start it first.
+ logger.Info("Starting ConfigMap watcher")
+ if err = configMapWatcher.Start(ctx.Done()); err != nil {
+ logger.Fatal("Failed to start ConfigMap watcher", zap.Error(err))
+ }
+
// Start informers and wait for them to sync.
logger.Info("Starting informers.")
if err := controller.StartInformers(ctx.Done(), informers...); err != nil {
@@ -153,26 +160,28 @@ func main() {
type Handler struct {
k8s kubernetes.Interface
lister sinkslister.JobSinkLister
- logger *zap.Logger
withContext func(ctx context.Context) context.Context
oidcTokenVerifier *auth.OIDCTokenVerifier
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ ctx := h.withContext(r.Context())
+ logger := logging.FromContext(ctx).Desugar()
+
if r.Method == http.MethodGet {
- h.handleGet(w, r)
+ h.handleGet(ctx, w, r)
return
}
if r.Method != http.MethodPost {
- h.logger.Info("Unexpected HTTP method", zap.String("method", r.Method))
+ logger.Info("Unexpected HTTP method", zap.String("method", r.Method))
w.WriteHeader(http.StatusBadRequest)
return
}
parts := strings.Split(strings.TrimSuffix(r.RequestURI, "/"), "/")
if len(parts) != 3 {
- h.logger.Info("Malformed uri", zap.String("URI", r.RequestURI), zap.Any("parts", parts))
+ logger.Info("Malformed uri", zap.String("URI", r.RequestURI), zap.Any("parts", parts))
w.WriteHeader(http.StatusBadRequest)
return
}
@@ -182,21 +191,22 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
Name: parts[2],
}
- h.logger.Debug("Handling POST request", zap.String("URI", r.RequestURI))
+ logger.Debug("Handling POST request", zap.String("URI", r.RequestURI))
- ctx := h.withContext(r.Context())
features := feature.FromContext(ctx)
+ logger.Debug("features", zap.Any("features", features))
+
if features.IsOIDCAuthentication() {
- h.logger.Debug("OIDC authentication is enabled")
+ logger.Debug("OIDC authentication is enabled")
audience := auth.GetAudienceDirect(sinksv.SchemeGroupVersion.WithKind("JobSink"), ref.Namespace, ref.Name)
err := h.oidcTokenVerifier.VerifyJWTFromRequest(ctx, r, &audience, w)
if err != nil {
- h.logger.Warn("Error when validating the JWT token in the request", zap.Error(err))
+ logger.Warn("Error when validating the JWT token in the request", zap.Error(err))
return
}
- h.logger.Debug("Request contained a valid JWT. Continuing...")
+ logger.Debug("Request contained a valid JWT. Continuing...")
}
message := cehttp.NewMessageFromHttpRequest(r)
@@ -204,33 +214,33 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
event, err := binding.ToEvent(r.Context(), message)
if err != nil {
- h.logger.Warn("failed to extract event from request", zap.Error(err))
+ logger.Warn("failed to extract event from request", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
return
}
if err := event.Validate(); err != nil {
- h.logger.Info("failed to validate event from request", zap.Error(err))
+ logger.Info("failed to validate event from request", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
return
}
js, err := h.lister.JobSinks(ref.Namespace).Get(ref.Name)
if err != nil {
- h.logger.Warn("Failed to retrieve jobsink", zap.String("ref", ref.String()), zap.Error(err))
+ logger.Warn("Failed to retrieve jobsink", zap.String("ref", ref.String()), zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
return
}
id := toIdHashLabelValue(event.Source(), event.ID())
- h.logger.Debug("Getting job for event", zap.String("URI", r.RequestURI), zap.String("id", id))
+ logger.Debug("Getting job for event", zap.String("URI", r.RequestURI), zap.String("id", id))
jobs, err := h.k8s.BatchV1().Jobs(js.GetNamespace()).List(r.Context(), metav1.ListOptions{
LabelSelector: jobLabelSelector(ref, id),
Limit: 1,
})
if err != nil {
- h.logger.Warn("Failed to retrieve job", zap.Error(err))
+ logger.Warn("Failed to retrieve job", zap.Error(err))
w.WriteHeader(http.StatusInternalServerError)
return
}
@@ -242,14 +252,14 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
eventBytes, err := event.MarshalJSON()
if err != nil {
- h.logger.Info("Failed to marshal event", zap.Error(err))
+ logger.Info("Failed to marshal event", zap.Error(err))
w.WriteHeader(http.StatusInternalServerError)
return
}
jobName := kmeta.ChildName(ref.Name, id)
- h.logger.Debug("Creating secret for event", zap.String("URI", r.RequestURI), zap.String("jobName", jobName))
+ logger.Debug("Creating secret for event", zap.String("URI", r.RequestURI), zap.String("jobName", jobName))
jobSinkUID := js.GetUID()
@@ -280,14 +290,14 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
_, err = h.k8s.CoreV1().Secrets(ref.Namespace).Create(r.Context(), secret, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
- h.logger.Warn("Failed to create secret", zap.Error(err))
+ logger.Warn("Failed to create secret", zap.Error(err))
w.Header().Add("Reason", err.Error())
w.WriteHeader(http.StatusInternalServerError)
return
}
- h.logger.Debug("Creating job for event", zap.String("URI", r.RequestURI), zap.String("jobName", jobName))
+ logger.Debug("Creating job for event", zap.String("URI", r.RequestURI), zap.String("jobName", jobName))
job := js.Spec.Job.DeepCopy()
job.Name = jobName
@@ -339,7 +349,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
_, err = h.k8s.BatchV1().Jobs(ref.Namespace).Create(r.Context(), job, metav1.CreateOptions{})
if err != nil {
- h.logger.Warn("Failed to create job", zap.Error(err))
+ logger.Warn("Failed to create job", zap.Error(err))
w.Header().Add("Reason", err.Error())
w.WriteHeader(http.StatusInternalServerError)
@@ -350,10 +360,11 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusAccepted)
}
-func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
+func (h *Handler) handleGet(ctx context.Context, w http.ResponseWriter, r *http.Request) {
+ logger := logging.FromContext(ctx)
parts := strings.Split(strings.TrimSuffix(r.RequestURI, "/"), "/")
if len(parts) != 9 {
- h.logger.Info("Malformed uri", zap.String("URI", r.RequestURI))
+ logger.Info("Malformed uri", zap.String("URI", r.RequestURI))
w.WriteHeader(http.StatusBadRequest)
return
}
@@ -363,21 +374,20 @@ func (h *Handler) handleGet(w http.ResponseWriter, r *http.Request) {
Name: parts[4],
}
- h.logger.Debug("Handling GET request", zap.String("URI", r.RequestURI))
+ logger.Debug("Handling GET request", zap.String("URI", r.RequestURI))
- ctx := h.withContext(r.Context())
features := feature.FromContext(ctx)
if features.IsOIDCAuthentication() {
- h.logger.Debug("OIDC authentication is enabled")
+ logger.Debug("OIDC authentication is enabled")
audience := auth.GetAudienceDirect(sinksv.SchemeGroupVersion.WithKind("JobSink"), ref.Namespace, ref.Name)
err := h.oidcTokenVerifier.VerifyJWTFromRequest(ctx, r, &audience, w)
if err != nil {
- h.logger.Warn("Error when validating the JWT token in the request", zap.Error(err))
+ logger.Warn("Error when validating the JWT token in the request", zap.Error(err))
return
}
- h.logger.Debug("Request contained a valid JWT. Continuing...")
+ logger.Debug("Request contained a valid JWT. Continuing...")
}
eventSource := parts[6]
diff --git a/pkg/apis/feature/store.go b/pkg/apis/feature/store.go
index a22f313c232..8285f786292 100644
--- a/pkg/apis/feature/store.go
+++ b/pkg/apis/feature/store.go
@@ -40,12 +40,12 @@ func FromContext(ctx context.Context) Flags {
}
// FromContextOrDefaults is like FromContext, but when no Flags is attached it
-// returns an empty Flags.
+// returns default Flags.
func FromContextOrDefaults(ctx context.Context) Flags {
if cfg := FromContext(ctx); cfg != nil {
return cfg
}
- return Flags{}
+ return newDefaults()
}
// ToContext attaches the provided Flags to the provided context, returning the
diff --git a/test/rekt/features/jobsink/jobsink.go b/test/rekt/features/jobsink/jobsink.go
index 3495621d2d3..82bac2849d0 100644
--- a/test/rekt/features/jobsink/jobsink.go
+++ b/test/rekt/features/jobsink/jobsink.go
@@ -25,6 +25,7 @@ import (
"github.com/google/uuid"
batchv1 "k8s.io/api/batch/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"knative.dev/pkg/apis"
kubeclient "knative.dev/pkg/client/injection/kube/client"
@@ -35,6 +36,7 @@ import (
"knative.dev/reconciler-test/pkg/k8s"
"knative.dev/eventing/pkg/apis/sinks"
+ "knative.dev/eventing/pkg/auth"
"knative.dev/eventing/test/rekt/features/featureflags"
"knative.dev/eventing/test/rekt/resources/addressable"
"knative.dev/eventing/test/rekt/resources/jobsink"
@@ -66,6 +68,11 @@ func Success() *feature.Feature {
MatchReceivedEvent(cetest.HasId(event.ID())).
AtLeast(1),
)
+ f.Assert("Source sent the event", assert.OnStore(source).
+ Match(assert.MatchKind(eventshub.EventResponse)).
+ Match(assert.MatchStatusCode(202)).
+ AtLeast(1),
+ )
f.Assert("At least one Job is complete", AtLeastOneJobIsComplete(jobSink))
return f
@@ -84,6 +91,7 @@ func SuccessTLS() *feature.Feature {
event.SetID(uuid.NewString())
f.Prerequisite("transport encryption is strict", featureflags.TransportEncryptionStrict())
+ f.Prerequisite("should not run when Istio is enabled", featureflags.IstioDisabled())
f.Setup("install forwarder sink", eventshub.Install(sink, eventshub.StartReceiver))
f.Setup("install job sink", jobsink.Install(jobSink, jobsink.WithForwarderJob(sinkURL.String())))
@@ -100,6 +108,81 @@ func SuccessTLS() *feature.Feature {
MatchReceivedEvent(cetest.HasId(event.ID())).
AtLeast(1),
)
+ f.Assert("Source sent the event", assert.OnStore(source).
+ Match(assert.MatchKind(eventshub.EventResponse)).
+ Match(assert.MatchStatusCode(202)).
+ AtLeast(1),
+ )
+ f.Assert("At least one Job is complete", AtLeastOneJobIsComplete(jobSink))
+
+ return f
+}
+
+func OIDC() *feature.Feature {
+ f := feature.NewFeature()
+
+ sink := feature.MakeRandomK8sName("sink")
+ jobSink := feature.MakeRandomK8sName("jobsink")
+ source := feature.MakeRandomK8sName("source")
+ sourceNoAudience := feature.MakeRandomK8sName("source-no-audience")
+
+ sinkURL := &apis.URL{Scheme: "http", Host: sink}
+
+ event := cetest.FullEvent()
+ event.SetID(uuid.NewString())
+
+ eventNoAudience := cetest.FullEvent()
+ eventNoAudience.SetID(uuid.NewString())
+
+ f.Prerequisite("OIDC authentication is enabled", featureflags.AuthenticationOIDCEnabled())
+ f.Prerequisite("transport encryption is strict", featureflags.TransportEncryptionStrict())
+ f.Prerequisite("should not run when Istio is enabled", featureflags.IstioDisabled())
+
+ f.Setup("install forwarder sink", eventshub.Install(sink, eventshub.StartReceiver))
+ f.Setup("install job sink", jobsink.Install(jobSink, jobsink.WithForwarderJob(sinkURL.String())))
+
+ f.Setup("jobsink is addressable", jobsink.IsAddressable(jobSink))
+ f.Setup("jobsink is ready", jobsink.IsAddressable(jobSink))
+
+ f.Requirement("install source", eventshub.Install(source,
+ eventshub.StartSenderToResource(jobsink.GVR(), jobSink),
+ eventshub.InputEvent(event)))
+
+ f.Requirement("install source no audience", func(ctx context.Context, t feature.T) {
+ addr, err := jobsink.Address(ctx, jobSink)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ eventshub.Install(sourceNoAudience,
+ eventshub.StartSenderURLTLS(addr.URL.String(), addr.CACerts),
+ eventshub.InputEvent(eventNoAudience))(ctx, t)
+ })
+
+ f.Assert("JobSink has audience in address", func(ctx context.Context, t feature.T) {
+ gvk := schema.GroupVersionKind{
+ Group: jobsink.GVR().Group,
+ Version: jobsink.GVR().Version,
+ Kind: "JobSink",
+ }
+ addressable.ValidateAddress(jobsink.GVR(), jobSink, addressable.AssertAddressWithAudience(
+ auth.GetAudienceDirect(gvk, environment.FromContext(ctx).Namespace(), jobSink)),
+ )(ctx, t)
+ })
+ f.Assert("Source sent the event with audience", assert.OnStore(source).
+ Match(assert.MatchKind(eventshub.EventResponse)).
+ Match(assert.MatchStatusCode(202)).
+ AtLeast(1),
+ )
+ f.Assert("Source sent the event without audience", assert.OnStore(sourceNoAudience).
+ Match(assert.MatchKind(eventshub.EventResponse)).
+ Match(assert.MatchStatusCode(401)).
+ AtLeast(1),
+ )
+ f.Assert("Job is created with the mounted event", assert.OnStore(sink).
+ MatchReceivedEvent(cetest.HasId(event.ID())).
+ AtLeast(1),
+ )
f.Assert("At least one Job is complete", AtLeastOneJobIsComplete(jobSink))
return f
diff --git a/test/rekt/job_sink_test.go b/test/rekt/job_sink_test.go
index d8436c949eb..69fa9e02263 100644
--- a/test/rekt/job_sink_test.go
+++ b/test/rekt/job_sink_test.go
@@ -59,3 +59,18 @@ func TestJobSinkSuccessTLS(t *testing.T) {
env.Test(ctx, t, jobsink.SuccessTLS())
}
+
+func TestJobSinkOIDC(t *testing.T) {
+ t.Parallel()
+
+ ctx, env := global.Environment(
+ knative.WithKnativeNamespace(system.Namespace()),
+ knative.WithLoggingConfig,
+ knative.WithTracingConfig,
+ k8s.WithEventListener,
+ eventshub.WithTLS(t),
+ environment.Managed(t),
+ )
+
+ env.Test(ctx, t, jobsink.OIDC())
+}
From f4df40f42bf2b4a48fe62761b3efa64205433657 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christoph=20St=C3=A4bler?=
Date: Thu, 20 Jun 2024 17:54:18 +0200
Subject: [PATCH 19/33] List applying policies in InMemoryChannels status
(#8011)
* List applying EventPolicies in InMemoryChannels status
* Add & update integration tests
* Reconcile IMC only on relevant EventPolicy changes
* Only list ready EventPolicies in .status.policies
* Make reason for not being ready a bit more speaking (NotReady -> EventPoliciesNotReady)
* Make EventPolicy EventHandler generic
* Move EventPolicyEventHandler to auth package
* Refactor to use EventPolicy helper functions from reconciler/testing package
* Add unit test for GetApplyingResourcesOfEventPolicyForGK
* Fix linter issues
* Match everything in namespace on empty .spec.to
* GetApplyingResourcesOfEventPolicyForGK should return list of unique elements
* Added very basic unit test for EventPolicyEventHandler
---
.../roles/controller-clusterrole.yaml | 8 +
.../v1/in_memory_channel_lifecycle.go | 21 +
.../v1/in_memory_channel_lifecycle_test.go | 34 ++
pkg/auth/event_policy.go | 142 +++++++
pkg/auth/event_policy_test.go | 370 ++++++++++++++++++
pkg/reconciler/channel/channel_test.go | 12 +-
.../inmemorychannel/controller/controller.go | 15 +-
.../controller/controller_test.go | 1 +
.../controller/inmemorychannel.go | 45 +++
.../controller/inmemorychannel_test.go | 165 +++++++-
.../dispatcher/inmemorychannel_test.go | 23 +-
.../dispatcher/readiness_test.go | 1 +
pkg/reconciler/testing/v1/eventpolicy.go | 101 +++++
pkg/reconciler/testing/v1/inmemorychannel.go | 39 ++
pkg/reconciler/testing/v1/listers.go | 6 +
15 files changed, 958 insertions(+), 25 deletions(-)
create mode 100644 pkg/reconciler/testing/v1/eventpolicy.go
diff --git a/config/channels/in-memory-channel/roles/controller-clusterrole.yaml b/config/channels/in-memory-channel/roles/controller-clusterrole.yaml
index 9e1ab6812b4..6164e834f41 100644
--- a/config/channels/in-memory-channel/roles/controller-clusterrole.yaml
+++ b/config/channels/in-memory-channel/roles/controller-clusterrole.yaml
@@ -45,6 +45,14 @@ rules:
- inmemorychannels
verbs:
- patch
+ - apiGroups:
+ - eventing.knative.dev
+ resources:
+ - eventpolicies
+ verbs:
+ - get
+ - list
+ - watch
- apiGroups:
- ""
resources:
diff --git a/pkg/apis/messaging/v1/in_memory_channel_lifecycle.go b/pkg/apis/messaging/v1/in_memory_channel_lifecycle.go
index 6be9e29f338..3b6441a305a 100644
--- a/pkg/apis/messaging/v1/in_memory_channel_lifecycle.go
+++ b/pkg/apis/messaging/v1/in_memory_channel_lifecycle.go
@@ -33,6 +33,7 @@ var imcCondSet = apis.NewLivingConditionSet(
InMemoryChannelConditionAddressable,
InMemoryChannelConditionChannelServiceReady,
InMemoryChannelConditionDeadLetterSinkResolved,
+ InMemoryChannelConditionEventPoliciesReady,
)
const (
@@ -64,6 +65,10 @@ const (
// InMemoryChannelConditionDeadLetterSinkResolved has status True when there is a Dead Letter Sink ref or URI
// defined in the Spec.Delivery, is a valid destination and its correctly resolved into a valid URI
InMemoryChannelConditionDeadLetterSinkResolved apis.ConditionType = "DeadLetterSinkResolved"
+
+ // InMemoryChannelConditionEventPoliciesReady has status True when all the applying EventPolicies for this
+ // InMemoryChannel are ready.
+ InMemoryChannelConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady"
)
// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
@@ -182,3 +187,19 @@ func (imcs *InMemoryChannelStatus) MarkDeadLetterSinkResolvedFailed(reason, mess
imcs.DeliveryStatus = eventingduck.DeliveryStatus{}
imcCondSet.Manage(imcs).MarkFalse(InMemoryChannelConditionDeadLetterSinkResolved, reason, messageFormat, messageA...)
}
+
+func (imcs *InMemoryChannelStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkFalse(InMemoryChannelConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (imcs *InMemoryChannelStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkUnknown(InMemoryChannelConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (imcs *InMemoryChannelStatus) MarkEventPoliciesTrue() {
+ imcCondSet.Manage(imcs).MarkTrue(InMemoryChannelConditionEventPoliciesReady)
+}
+
+func (imcs *InMemoryChannelStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkTrueWithReason(InMemoryChannelConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
diff --git a/pkg/apis/messaging/v1/in_memory_channel_lifecycle_test.go b/pkg/apis/messaging/v1/in_memory_channel_lifecycle_test.go
index 7c97a86ed21..f1785342672 100644
--- a/pkg/apis/messaging/v1/in_memory_channel_lifecycle_test.go
+++ b/pkg/apis/messaging/v1/in_memory_channel_lifecycle_test.go
@@ -137,6 +137,9 @@ func TestInMemoryChannelInitializeConditions(t *testing.T) {
}, {
Type: InMemoryChannelConditionEndpointsReady,
Status: corev1.ConditionUnknown,
+ }, {
+ Type: InMemoryChannelConditionEventPoliciesReady,
+ Status: corev1.ConditionUnknown,
}, {
Type: InMemoryChannelConditionReady,
Status: corev1.ConditionUnknown,
@@ -177,6 +180,9 @@ func TestInMemoryChannelInitializeConditions(t *testing.T) {
}, {
Type: InMemoryChannelConditionEndpointsReady,
Status: corev1.ConditionUnknown,
+ }, {
+ Type: InMemoryChannelConditionEventPoliciesReady,
+ Status: corev1.ConditionUnknown,
}, {
Type: InMemoryChannelConditionReady,
Status: corev1.ConditionUnknown,
@@ -217,6 +223,9 @@ func TestInMemoryChannelInitializeConditions(t *testing.T) {
}, {
Type: InMemoryChannelConditionEndpointsReady,
Status: corev1.ConditionUnknown,
+ }, {
+ Type: InMemoryChannelConditionEventPoliciesReady,
+ Status: corev1.ConditionUnknown,
}, {
Type: InMemoryChannelConditionReady,
Status: corev1.ConditionUnknown,
@@ -244,6 +253,7 @@ func TestInMemoryChannelIsReady(t *testing.T) {
name string
markServiceReady bool
markChannelServiceReady bool
+ markEventPolicyReady bool
setAddress bool
markEndpointsReady bool
DLSResolved *bool
@@ -253,6 +263,7 @@ func TestInMemoryChannelIsReady(t *testing.T) {
name: "all happy",
markServiceReady: true,
markChannelServiceReady: true,
+ markEventPolicyReady: true,
markEndpointsReady: true,
dispatcherStatus: deploymentStatusReady,
setAddress: true,
@@ -262,6 +273,7 @@ func TestInMemoryChannelIsReady(t *testing.T) {
name: "service not ready",
markServiceReady: false,
markChannelServiceReady: false,
+ markEventPolicyReady: true,
markEndpointsReady: true,
dispatcherStatus: deploymentStatusReady,
setAddress: true,
@@ -271,6 +283,7 @@ func TestInMemoryChannelIsReady(t *testing.T) {
name: "endpoints not ready",
markServiceReady: true,
markChannelServiceReady: false,
+ markEventPolicyReady: true,
markEndpointsReady: false,
dispatcherStatus: deploymentStatusReady,
setAddress: true,
@@ -281,6 +294,7 @@ func TestInMemoryChannelIsReady(t *testing.T) {
markServiceReady: true,
markEndpointsReady: true,
markChannelServiceReady: false,
+ markEventPolicyReady: true,
dispatcherStatus: deploymentStatusNotReady,
setAddress: true,
wantReady: false,
@@ -289,6 +303,7 @@ func TestInMemoryChannelIsReady(t *testing.T) {
name: "address not set",
markServiceReady: true,
markChannelServiceReady: false,
+ markEventPolicyReady: true,
markEndpointsReady: true,
dispatcherStatus: deploymentStatusReady,
setAddress: false,
@@ -298,6 +313,7 @@ func TestInMemoryChannelIsReady(t *testing.T) {
name: "channel service not ready",
markServiceReady: true,
markChannelServiceReady: false,
+ markEventPolicyReady: true,
markEndpointsReady: true,
dispatcherStatus: deploymentStatusReady,
setAddress: true,
@@ -307,6 +323,7 @@ func TestInMemoryChannelIsReady(t *testing.T) {
name: "dls sad",
markServiceReady: true,
markChannelServiceReady: false,
+ markEventPolicyReady: true,
markEndpointsReady: true,
dispatcherStatus: deploymentStatusReady,
setAddress: true,
@@ -316,6 +333,17 @@ func TestInMemoryChannelIsReady(t *testing.T) {
name: "dls not configured",
markServiceReady: true,
markChannelServiceReady: false,
+ markEventPolicyReady: true,
+ markEndpointsReady: true,
+ dispatcherStatus: deploymentStatusReady,
+ setAddress: true,
+ wantReady: false,
+ DLSResolved: &trueVal,
+ }, {
+ name: "EventPolicy not ready",
+ markServiceReady: true,
+ markChannelServiceReady: true,
+ markEventPolicyReady: false,
markEndpointsReady: true,
dispatcherStatus: deploymentStatusReady,
setAddress: true,
@@ -336,6 +364,11 @@ func TestInMemoryChannelIsReady(t *testing.T) {
} else {
cs.MarkChannelServiceFailed("NotReadyChannelService", "testing")
}
+ if test.markEventPolicyReady {
+ cs.MarkEventPoliciesTrue()
+ } else {
+ cs.MarkEndpointsFailed("NotReadyEventPolicy", "testing")
+ }
if test.setAddress {
cs.SetAddress(&duckv1.Addressable{URL: &apis.URL{Scheme: "http", Host: "foo.bar"}})
}
@@ -437,6 +470,7 @@ func TestInMemoryChannelStatus_SetAddressable(t *testing.T) {
func ReadyBrokerStatusWithoutDLS() *InMemoryChannelStatus {
imcs := &InMemoryChannelStatus{}
imcs.MarkChannelServiceTrue()
+ imcs.MarkEventPoliciesTrue()
imcs.MarkDeadLetterSinkNotConfigured()
imcs.MarkEndpointsTrue()
imcs.SetAddress(&duckv1.Addressable{URL: apis.HTTP("example.com")})
diff --git a/pkg/auth/event_policy.go b/pkg/auth/event_policy.go
index 26efd163409..e049772f135 100644
--- a/pkg/auth/event_policy.go
+++ b/pkg/auth/event_policy.go
@@ -20,6 +20,9 @@ import (
"fmt"
"strings"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/tools/cache"
+
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@@ -86,6 +89,71 @@ func GetEventPoliciesForResource(lister listerseventingv1alpha1.EventPolicyListe
return relevantPolicies, nil
}
+// GetApplyingResourcesOfEventPolicyForGK returns all applying resource names of GK of the given event policy.
+// It returns only the names, as the resources are part of the same namespace as the event policy.
+//
+// This function is kind of the "inverse" of GetEventPoliciesForResource.
+func GetApplyingResourcesOfEventPolicyForGK(eventPolicy *v1alpha1.EventPolicy, gk schema.GroupKind, gkIndexer cache.Indexer) ([]string, error) {
+ applyingResources := map[string]struct{}{}
+
+ if eventPolicy.Spec.To == nil {
+ // empty .spec.to matches everything in namespace
+
+ err := cache.ListAllByNamespace(gkIndexer, eventPolicy.Namespace, labels.Everything(), func(i interface{}) {
+ name := i.(metav1.Object).GetName()
+ applyingResources[name] = struct{}{}
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to list all %s %s resources in %s: %w", gk.Group, gk.Kind, eventPolicy.Namespace, err)
+ }
+ } else {
+ for _, to := range eventPolicy.Spec.To {
+ if to.Ref != nil {
+ toGV, err := schema.ParseGroupVersion(to.Ref.APIVersion)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse group version of %q: %w", to.Ref.APIVersion, err)
+ }
+
+ if strings.EqualFold(toGV.Group, gk.Group) &&
+ strings.EqualFold(to.Ref.Kind, gk.Kind) {
+
+ applyingResources[to.Ref.Name] = struct{}{}
+ }
+ }
+
+ if to.Selector != nil {
+ selectorGV, err := schema.ParseGroupVersion(to.Selector.APIVersion)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse group version of %q: %w", to.Selector.APIVersion, err)
+ }
+
+ if strings.EqualFold(selectorGV.Group, gk.Group) &&
+ strings.EqualFold(to.Selector.Kind, gk.Kind) {
+
+ selector, err := metav1.LabelSelectorAsSelector(to.Selector.LabelSelector)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse label selector %v: %w", to.Selector.LabelSelector, err)
+ }
+
+ err = cache.ListAllByNamespace(gkIndexer, eventPolicy.Namespace, selector, func(i interface{}) {
+ name := i.(metav1.Object).GetName()
+ applyingResources[name] = struct{}{}
+ })
+ if err != nil {
+ return nil, fmt.Errorf("could not list resources of GK in %q namespace for selector %v: %w", eventPolicy.Namespace, selector, err)
+ }
+ }
+ }
+ }
+ }
+
+ res := []string{}
+ for name := range applyingResources {
+ res = append(res, name)
+ }
+ return res, nil
+}
+
// ResolveSubjects returns the OIDC service accounts names for the objects referenced in the EventPolicySpecFrom.
func ResolveSubjects(resolver *resolver.AuthenticatableResolver, eventPolicy *v1alpha1.EventPolicy) ([]string, error) {
allSAs := []string{}
@@ -145,3 +213,77 @@ func SubjectContained(sub string, allowedSubs []string) bool {
return false
}
+
+func handleApplyingResourcesOfEventPolicy(eventPolicy *v1alpha1.EventPolicy, gk schema.GroupKind, indexer cache.Indexer, handlerFn func(key types.NamespacedName) error) error {
+ applyingResources, err := GetApplyingResourcesOfEventPolicyForGK(eventPolicy, gk, indexer)
+ if err != nil {
+ return fmt.Errorf("could not get applying resources of eventpolicy: %w", err)
+ }
+
+ for _, resourceName := range applyingResources {
+ err := handlerFn(types.NamespacedName{
+ Namespace: eventPolicy.Namespace,
+ Name: resourceName,
+ })
+
+ if err != nil {
+ return fmt.Errorf("could not handle resource %q: %w", resourceName, err)
+ }
+ }
+
+ return nil
+}
+
+// EventPolicyEventHandler returns an ResourceEventHandler, which passes the referencing resources of the EventPolicy
+// to the enqueueFn if the EventPolicy was referencing or got updated and now is referencing the resource of the given GVK.
+func EventPolicyEventHandler(indexer cache.Indexer, gk schema.GroupKind, enqueueFn func(key types.NamespacedName)) cache.ResourceEventHandler {
+ return cache.ResourceEventHandlerFuncs{
+ AddFunc: func(obj interface{}) {
+ eventPolicy, ok := obj.(*v1alpha1.EventPolicy)
+ if !ok {
+ return
+ }
+
+ handleApplyingResourcesOfEventPolicy(eventPolicy, gk, indexer, func(key types.NamespacedName) error {
+ enqueueFn(key)
+ return nil
+ })
+ },
+ UpdateFunc: func(oldObj, newObj interface{}) {
+ // Here we need to check if the old or the new EventPolicy was referencing the given GVK
+ oldEventPolicy, ok := oldObj.(*v1alpha1.EventPolicy)
+ if !ok {
+ return
+ }
+ newEventPolicy, ok := newObj.(*v1alpha1.EventPolicy)
+ if !ok {
+ return
+ }
+
+ // make sure, we handle the keys only once
+ toHandle := map[types.NamespacedName]struct{}{}
+ addToHandleList := func(key types.NamespacedName) error {
+ toHandle[key] = struct{}{}
+ return nil
+ }
+
+ handleApplyingResourcesOfEventPolicy(oldEventPolicy, gk, indexer, addToHandleList)
+ handleApplyingResourcesOfEventPolicy(newEventPolicy, gk, indexer, addToHandleList)
+
+ for k := range toHandle {
+ enqueueFn(k)
+ }
+ },
+ DeleteFunc: func(obj interface{}) {
+ eventPolicy, ok := obj.(*v1alpha1.EventPolicy)
+ if !ok {
+ return
+ }
+
+ handleApplyingResourcesOfEventPolicy(eventPolicy, gk, indexer, func(key types.NamespacedName) error {
+ enqueueFn(key)
+ return nil
+ })
+ },
+ }
+}
diff --git a/pkg/auth/event_policy_test.go b/pkg/auth/event_policy_test.go
index 64f972c7ba6..124f1423173 100644
--- a/pkg/auth/event_policy_test.go
+++ b/pkg/auth/event_policy_test.go
@@ -18,9 +18,12 @@ package auth
import (
"context"
+ "reflect"
"strings"
"testing"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
"github.com/google/go-cmp/cmp"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -30,6 +33,7 @@ import (
"knative.dev/eventing/pkg/apis/eventing/v1alpha1"
sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1"
"knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+ brokerinformerfake "knative.dev/eventing/pkg/client/injection/informers/eventing/v1/broker/fake"
eventpolicyinformerfake "knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/fake"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/client/injection/ducks/duck/v1/authstatus"
@@ -764,3 +768,369 @@ func TestSubjectContained(t *testing.T) {
})
}
}
+
+func TestGetApplyingResourcesOfEventPolicyForGK(t *testing.T) {
+ tests := []struct {
+ name string
+ eventPolicySpecTo []v1alpha1.EventPolicySpecTo
+ gk schema.GroupKind
+ brokerObjects []*eventingv1.Broker
+ want []string
+ wantErr bool
+ }{
+ {
+ name: "Returns resource from direct reference",
+ eventPolicySpecTo: []v1alpha1.EventPolicySpecTo{
+ {
+ Ref: &v1alpha1.EventPolicyToReference{
+ APIVersion: eventingv1.SchemeGroupVersion.String(),
+ Kind: "Broker",
+ Name: "my-broker",
+ },
+ },
+ },
+ gk: schema.GroupKind{
+ Group: "eventing.knative.dev",
+ Kind: "Broker",
+ },
+ brokerObjects: []*eventingv1.Broker{}, //for a direct reference, we don't need the indexer later
+ want: []string{
+ "my-broker",
+ },
+ }, {
+ name: "Ignores resources of other Group&Kind in direct reference",
+ eventPolicySpecTo: []v1alpha1.EventPolicySpecTo{
+ {
+ Ref: &v1alpha1.EventPolicyToReference{
+ APIVersion: eventingv1.SchemeGroupVersion.String(),
+ Kind: "Broker",
+ Name: "my-broker",
+ },
+ }, {
+ Ref: &v1alpha1.EventPolicyToReference{
+ APIVersion: eventingv1.SchemeGroupVersion.String(),
+ Kind: "Another-Kind",
+ Name: "another-res",
+ },
+ },
+ },
+ gk: schema.GroupKind{
+ Group: "eventing.knative.dev",
+ Kind: "Broker",
+ },
+ brokerObjects: []*eventingv1.Broker{},
+ want: []string{
+ "my-broker",
+ },
+ }, {
+ name: "Returns object which match selector",
+ eventPolicySpecTo: []v1alpha1.EventPolicySpecTo{
+ {
+ Selector: &v1alpha1.EventPolicySelector{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "value",
+ },
+ },
+ TypeMeta: &metav1.TypeMeta{
+ APIVersion: eventingv1.SchemeGroupVersion.String(),
+ Kind: "Broker",
+ },
+ },
+ },
+ },
+ gk: schema.GroupKind{
+ Group: "eventing.knative.dev",
+ Kind: "Broker",
+ },
+ brokerObjects: []*eventingv1.Broker{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: "my-ns",
+ Labels: map[string]string{
+ "key": "value",
+ },
+ },
+ }, {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-other-broker",
+ Namespace: "my-ns",
+ Labels: map[string]string{
+ "other-key": "other-value",
+ },
+ },
+ },
+ },
+ want: []string{
+ "my-broker",
+ },
+ }, {
+ name: "Checks on GKs on selector match",
+ eventPolicySpecTo: []v1alpha1.EventPolicySpecTo{
+ {
+ Selector: &v1alpha1.EventPolicySelector{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "value",
+ },
+ },
+ TypeMeta: &metav1.TypeMeta{
+ APIVersion: eventingv1.SchemeGroupVersion.String(),
+ Kind: "Broker",
+ },
+ },
+ },
+ },
+ gk: schema.GroupKind{
+ Group: "eventing.knative.dev",
+ Kind: "Other-Kind",
+ },
+ brokerObjects: []*eventingv1.Broker{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: "my-ns",
+ Labels: map[string]string{
+ "key": "value",
+ },
+ },
+ },
+ },
+ want: []string{},
+ }, {
+ name: "Empty .spec.to matches everything in namespace",
+ eventPolicySpecTo: nil,
+ gk: schema.GroupKind{
+ Group: "eventing.knative.dev",
+ Kind: "Broker",
+ },
+ brokerObjects: []*eventingv1.Broker{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: "my-ns",
+ Labels: map[string]string{
+ "key": "value",
+ },
+ },
+ },
+ },
+ want: []string{
+ "my-broker",
+ },
+ }, {
+ name: "Returns elements only once in slice",
+ eventPolicySpecTo: []v1alpha1.EventPolicySpecTo{
+ {
+ Ref: &v1alpha1.EventPolicyToReference{
+ APIVersion: eventingv1.SchemeGroupVersion.String(),
+ Kind: "Broker",
+ Name: "my-broker",
+ },
+ },
+ {
+ Selector: &v1alpha1.EventPolicySelector{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "key": "value",
+ },
+ },
+ TypeMeta: &metav1.TypeMeta{
+ APIVersion: eventingv1.SchemeGroupVersion.String(),
+ Kind: "Broker",
+ },
+ },
+ },
+ },
+ gk: schema.GroupKind{
+ Group: "eventing.knative.dev",
+ Kind: "Broker",
+ },
+ brokerObjects: []*eventingv1.Broker{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-broker",
+ Namespace: "my-ns",
+ Labels: map[string]string{
+ "key": "value",
+ },
+ },
+ },
+ },
+ want: []string{
+ "my-broker",
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx, _ := reconcilertesting.SetupFakeContext(t)
+
+ brokerIndexer := brokerinformerfake.Get(ctx).Informer().GetIndexer()
+ for _, b := range tt.brokerObjects {
+ err := brokerIndexer.Add(b)
+ if err != nil {
+ t.Fatalf("could not add broker object to indexer: %v", err)
+ }
+ }
+
+ eventPolicy := &v1alpha1.EventPolicy{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy",
+ Namespace: "my-ns",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: tt.eventPolicySpecTo,
+ },
+ }
+
+ got, err := GetApplyingResourcesOfEventPolicyForGK(eventPolicy, tt.gk, brokerIndexer)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("GetApplyingResourcesOfEventPolicyForGK() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("GetApplyingResourcesOfEventPolicyForGK() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestEventPolicyEventHandler_AddAndDelete(t *testing.T) {
+ eventPolicy := &v1alpha1.EventPolicy{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy",
+ Namespace: "my-ns",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Ref: &v1alpha1.EventPolicyToReference{
+ APIVersion: eventingv1.SchemeGroupVersion.String(),
+ Kind: "Broker",
+ Name: "my-broker",
+ },
+ },
+ },
+ },
+ }
+
+ gk := schema.GroupKind{
+ Group: eventingv1.SchemeGroupVersion.Group,
+ Kind: "Broker",
+ }
+
+ wantCalls := []string{
+ "my-broker",
+ }
+
+ calls := map[string]int{}
+ callbackFn := func(key types.NamespacedName) {
+ calls[key.Name]++
+ }
+
+ handler := EventPolicyEventHandler(nil, gk, callbackFn)
+ handler.OnAdd(eventPolicy, false)
+
+ if len(calls) != len(wantCalls) {
+ t.Errorf("EventPolicyEventHandler() callback in ADD was called on wrong resources. Want to be called on %v, but was called on %v", wantCalls, calls)
+ }
+ for _, wantCallForResource := range wantCalls {
+ num, ok := calls[wantCallForResource]
+ if !ok {
+ t.Errorf("EventPolicyEventHandler() callback in ADD was called on %s 0 times. Expected to be called only once", wantCallForResource)
+ }
+
+ if num != 1 {
+ t.Errorf("EventPolicyEventHandler() callback in ADD was called on %s %d times. Expected to be called only once", wantCallForResource, num)
+ }
+ }
+
+ // do the same for OnDelete
+ calls = map[string]int{}
+ handler.OnDelete(eventPolicy)
+
+ if len(calls) != len(wantCalls) {
+ t.Errorf("EventPolicyEventHandler() callback in DELETE was called on wrong resources. Want to be called on %v, but was called on %v", wantCalls, calls)
+ }
+ for _, wantCallForResource := range wantCalls {
+ num, ok := calls[wantCallForResource]
+ if !ok {
+ t.Errorf("EventPolicyEventHandler() callback in DELETE was called on %s 0 times. Expected to be called only once", wantCallForResource)
+ }
+
+ if num != 1 {
+ t.Errorf("EventPolicyEventHandler() callback in DELETE was called on %s %d times. Expected to be called only once", wantCallForResource, num)
+ }
+ }
+
+}
+
+func TestEventPolicyEventHandler_Update(t *testing.T) {
+ oldEventPolicy := &v1alpha1.EventPolicy{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy",
+ Namespace: "my-ns",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Ref: &v1alpha1.EventPolicyToReference{
+ APIVersion: eventingv1.SchemeGroupVersion.String(),
+ Kind: "Broker",
+ Name: "my-broker",
+ },
+ },
+ },
+ },
+ }
+ newEventPolicy := &v1alpha1.EventPolicy{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-policy",
+ Namespace: "my-ns",
+ },
+ Spec: v1alpha1.EventPolicySpec{
+ To: []v1alpha1.EventPolicySpecTo{
+ {
+ Ref: &v1alpha1.EventPolicyToReference{
+ APIVersion: eventingv1.SchemeGroupVersion.String(),
+ Kind: "Broker",
+ Name: "my-broker",
+ },
+ },
+ },
+ },
+ }
+
+ gk := schema.GroupKind{
+ Group: eventingv1.SchemeGroupVersion.Group,
+ Kind: "Broker",
+ }
+
+ wantCalls := []string{
+ "my-broker",
+ }
+
+ calls := map[string]int{}
+ callbackFn := func(key types.NamespacedName) {
+ calls[key.Name]++
+ }
+
+ handler := EventPolicyEventHandler(nil, gk, callbackFn)
+ handler.OnUpdate(oldEventPolicy, newEventPolicy)
+
+ if len(calls) != len(wantCalls) {
+ t.Errorf("EventPolicyEventHandler() callback in UPDATE was called on wrong resources. Want to be called on %v, but was called on %v", wantCalls, calls)
+ }
+ for _, wantCallForResource := range wantCalls {
+ num, ok := calls[wantCallForResource]
+ if !ok {
+ t.Errorf("EventPolicyEventHandler() callback in UPDATE was called on %s 0 times. Expected to be called only once", wantCallForResource)
+ }
+
+ if num != 1 {
+ t.Errorf("EventPolicyEventHandler() callback in UPDATE was called on %s %d times. Expected to be called only once", wantCallForResource, num)
+ }
+ }
+}
diff --git a/pkg/reconciler/channel/channel_test.go b/pkg/reconciler/channel/channel_test.go
index fe06a2dd633..91e0e469ffd 100644
--- a/pkg/reconciler/channel/channel_test.go
+++ b/pkg/reconciler/channel/channel_test.go
@@ -135,7 +135,8 @@ func TestReconcile(t *testing.T) {
WithInMemoryChannelEndpointsReady(),
WithInMemoryChannelChannelServiceReady(),
WithInMemoryChannelAddress(backingChannelAddressable),
- WithInMemoryChannelDLSUnknown()),
+ WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReady()),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewChannel(channelName, testNS,
@@ -165,7 +166,8 @@ func TestReconcile(t *testing.T) {
WithInMemoryChannelChannelServiceReady(),
WithInMemoryChannelSubscribers(subscribers()),
WithInMemoryChannelAddress(backingChannelAddressable),
- WithInMemoryChannelDLSUnknown()),
+ WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReady()),
},
}, {
Name: "Backing channel created",
@@ -259,7 +261,8 @@ func TestReconcile(t *testing.T) {
WithInMemoryChannelChannelServiceReady(),
WithInMemoryChannelSubscribers(subscribers()),
WithInMemoryChannelAddress(backingChannelAddressable),
- WithInMemoryChannelDLSUnknown()),
+ WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReady()),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewChannel(channelName, testNS,
@@ -293,7 +296,8 @@ func TestReconcile(t *testing.T) {
WithInMemoryChannelAddress(backingChannelAddressable),
WithInMemoryChannelSubscribers(subscribers()),
WithInMemoryChannelStatusSubscribers(subscriberStatuses()),
- WithInMemoryChannelDLSUnknown()),
+ WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReady()),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewChannel(channelName, testNS,
diff --git a/pkg/reconciler/inmemorychannel/controller/controller.go b/pkg/reconciler/inmemorychannel/controller/controller.go
index e60a5414e7b..2102b3728e7 100644
--- a/pkg/reconciler/inmemorychannel/controller/controller.go
+++ b/pkg/reconciler/inmemorychannel/controller/controller.go
@@ -19,18 +19,21 @@ package controller
import (
"context"
- kubeclient "knative.dev/pkg/client/injection/kube/client"
- "knative.dev/pkg/logging"
+ "knative.dev/eventing/pkg/auth"
"github.com/kelseyhightower/envconfig"
"k8s.io/client-go/tools/cache"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ kubeclient "knative.dev/pkg/client/injection/kube/client"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
+ "knative.dev/pkg/logging"
"knative.dev/pkg/system"
"knative.dev/pkg/resolver"
"knative.dev/eventing/pkg/apis/feature"
+ "knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy"
"knative.dev/eventing/pkg/client/injection/informers/messaging/v1/inmemorychannel"
inmemorychannelreconciler "knative.dev/eventing/pkg/client/injection/reconciler/messaging/v1/inmemorychannel"
"knative.dev/eventing/pkg/eventingtls"
@@ -65,6 +68,7 @@ func NewController(
serviceAccountInformer := serviceaccount.Get(ctx)
roleBindingInformer := rolebinding.Get(ctx)
secretInformer := secretinformer.Get(ctx)
+ eventPolicyInformer := eventpolicy.Get(ctx)
r := &Reconciler{
kubeClientSet: kubeclient.Get(ctx),
@@ -75,6 +79,7 @@ func NewController(
serviceAccountLister: serviceAccountInformer.Lister(),
roleBindingLister: roleBindingInformer.Lister(),
secretLister: secretInformer.Lister(),
+ eventPolicyLister: eventPolicyInformer.Lister(),
}
env := &envConfig{}
@@ -140,6 +145,12 @@ func NewController(
Handler: controller.HandleAll(globalResync),
})
+ imcGK := messagingv1.SchemeGroupVersion.WithKind("InMemoryChannel").GroupKind()
+
+ // Enqueue the InMemoryChannel, if we have an EventPolicy which was referencing
+ // or got updated and now is referencing the InMemoryChannel
+ eventPolicyInformer.Informer().AddEventHandler(auth.EventPolicyEventHandler(inmemorychannelInformer.Informer().GetIndexer(), imcGK, impl.EnqueueKey))
+
// Setup the watch on the config map of dispatcher config
configStore := config.NewEventDispatcherConfigStore(logging.FromContext(ctx))
configStore.WatchConfigs(cmw)
diff --git a/pkg/reconciler/inmemorychannel/controller/controller_test.go b/pkg/reconciler/inmemorychannel/controller/controller_test.go
index f6c60bda7fa..4a6f1f3d2f0 100644
--- a/pkg/reconciler/inmemorychannel/controller/controller_test.go
+++ b/pkg/reconciler/inmemorychannel/controller/controller_test.go
@@ -31,6 +31,7 @@ import (
. "knative.dev/pkg/reconciler/testing"
// Fake injection informers
+ _ "knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/fake"
_ "knative.dev/eventing/pkg/client/injection/informers/messaging/v1/inmemorychannel/fake"
"knative.dev/eventing/pkg/reconciler/inmemorychannel/controller/config"
diff --git a/pkg/reconciler/inmemorychannel/controller/inmemorychannel.go b/pkg/reconciler/inmemorychannel/controller/inmemorychannel.go
index 9795dd31fff..3d818463dd6 100644
--- a/pkg/reconciler/inmemorychannel/controller/inmemorychannel.go
+++ b/pkg/reconciler/inmemorychannel/controller/inmemorychannel.go
@@ -20,6 +20,11 @@ import (
"context"
"errors"
"fmt"
+ "strings"
+
+ eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1"
"k8s.io/client-go/kubernetes"
"k8s.io/utils/pointer"
@@ -93,6 +98,8 @@ type Reconciler struct {
eventDispatcherConfigStore *config.EventDispatcherConfigStore
uriResolver *resolver.URIResolver
+
+ eventPolicyLister v1alpha1.EventPolicyLister
}
// Check that our Reconciler implements Interface
@@ -231,6 +238,44 @@ func (r *Reconciler) ReconcileKind(ctx context.Context, imc *v1.InMemoryChannel)
imc.GetConditionSet().Manage(imc.GetStatus()).MarkTrue(v1.InMemoryChannelConditionAddressable)
+ imc.Status.Policies = nil
+ applyingEvenPolicies, err := auth.GetEventPoliciesForResource(r.eventPolicyLister, messagingv1.SchemeGroupVersion.WithKind("InMemoryChannel"), imc.ObjectMeta)
+ if err != nil {
+ logging.FromContext(ctx).Errorw("Unable to get applying event policies for InMemoryChannel", zap.Error(err))
+ imc.Status.MarkEventPoliciesFailed("EventPoliciesGetFailed", "Failed to get applying event policies")
+ }
+
+ if len(applyingEvenPolicies) > 0 {
+ unreadyEventPolicies := []string{}
+ for _, policy := range applyingEvenPolicies {
+ if !policy.Status.IsReady() {
+ unreadyEventPolicies = append(unreadyEventPolicies, policy.Name)
+ } else {
+ // only add Ready policies to the list
+ imc.Status.Policies = append(imc.Status.Policies, eventingduck.AppliedEventPolicyRef{
+ Name: policy.Name,
+ APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
+ })
+ }
+ }
+
+ if len(unreadyEventPolicies) == 0 {
+ imc.Status.MarkEventPoliciesTrue()
+ } else {
+ imc.Status.MarkEventPoliciesFailed("EventPoliciesNotReady", "event policies %s are not ready", strings.Join(unreadyEventPolicies, ", "))
+ }
+
+ } else {
+ // we have no applying event policy. So we set the EP condition to True
+ if featureFlags.IsOIDCAuthentication() {
+ // in case of OIDC auth, we also set the message with the default authorization mode
+ imc.Status.MarkEventPoliciesTrueWithReason("DefaultAuthorizationMode", "Default authz mode is %q", featureFlags[feature.AuthorizationDefaultMode])
+ } else {
+ // in case OIDC is disabled, we set EP condition to true too, but give some message that authz (EPs) require OIDC
+ imc.Status.MarkEventPoliciesTrueWithReason("OIDCDisabled", "Feature %q must be enabled to support Authorization", feature.OIDCAuthentication)
+ }
+ }
+
// Ok, so now the Dispatcher Deployment & Service have been created, we're golden since the
// dispatcher watches the Channel and where it needs to dispatch events to.
logging.FromContext(ctx).Debugw("Reconciled InMemoryChannel", zap.Any("InMemoryChannel", imc))
diff --git a/pkg/reconciler/inmemorychannel/controller/inmemorychannel_test.go b/pkg/reconciler/inmemorychannel/controller/inmemorychannel_test.go
index ac06a594d52..11c0aca30cd 100644
--- a/pkg/reconciler/inmemorychannel/controller/inmemorychannel_test.go
+++ b/pkg/reconciler/inmemorychannel/controller/inmemorychannel_test.go
@@ -22,6 +22,8 @@ import (
"strconv"
"testing"
+ "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
@@ -75,6 +77,9 @@ const (
maxIdleConnsPerHost = 200
imcGeneration = 7
+
+ readyEventPolicyName = "test-event-policy-ready"
+ unreadyEventPolicyName = "test-event-policy-unready"
)
var (
@@ -183,7 +188,8 @@ func TestAllCases(t *testing.T) {
WithInMemoryChannelEndpointsReady(),
WithInMemoryChannelChannelServiceReady(),
WithInMemoryChannelAddress(channelServiceAddress),
- WithInMemoryChannelDLSUnknown()),
+ WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReadyBecauseOIDCDisabled()),
}},
}, {
Name: "the status of deployment is unknown",
@@ -206,7 +212,8 @@ func TestAllCases(t *testing.T) {
WithInMemoryChannelEndpointsReady(),
WithInMemoryChannelChannelServiceReady(),
WithInMemoryChannelAddress(channelServiceAddress),
- WithInMemoryChannelDLSUnknown()),
+ WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReadyBecauseOIDCDisabled()),
}},
}, {
Name: "Service does not exist",
@@ -326,6 +333,7 @@ func TestAllCases(t *testing.T) {
WithInMemoryChannelAddress(channelServiceAddress),
WithDeadLetterSink(imcDest),
WithInMemoryChannelStatusDLS(dlsStatus),
+ WithInMemoryChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
}},
}, {
@@ -368,6 +376,7 @@ func TestAllCases(t *testing.T) {
URL: apis.HTTPS(dlsHost),
CACerts: pointer.String(string(eventingtlstesting.CA)),
}),
+ WithInMemoryChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
}},
}, {
@@ -393,6 +402,7 @@ func TestAllCases(t *testing.T) {
WithInMemoryChannelAddress(channelServiceAddress),
WithDeadLetterSink(imcDest),
WithInMemoryChannelStatusDLS(dlsStatus),
+ WithInMemoryChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
}},
}, {
@@ -443,6 +453,7 @@ func TestAllCases(t *testing.T) {
WithInMemoryChannelAddress(channelServiceAddress),
WithDeadLetterSink(imcDest),
WithInMemoryChannelStatusDLS(dlsStatus),
+ WithInMemoryChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
}},
}, {
@@ -473,6 +484,7 @@ func TestAllCases(t *testing.T) {
WithInMemoryChannelAddress(channelServiceAddress),
WithDeadLetterSink(imcDest),
WithInMemoryChannelStatusDLS(dlsStatus),
+ WithInMemoryChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
}},
}, {
@@ -541,6 +553,7 @@ func TestAllCases(t *testing.T) {
}),
WithDeadLetterSink(imcDest),
WithInMemoryChannelStatusDLS(dlsStatus),
+ WithInMemoryChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
}},
Ctx: feature.ToContext(context.Background(), feature.Flags{
@@ -588,6 +601,7 @@ func TestAllCases(t *testing.T) {
}),
WithDeadLetterSink(imcDest),
WithInMemoryChannelStatusDLS(dlsStatus),
+ WithInMemoryChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
}},
Ctx: feature.ToContext(context.Background(), feature.Flags{
@@ -617,18 +631,143 @@ func TestAllCases(t *testing.T) {
WithInMemoryChannelServiceReady(),
WithInMemoryChannelEndpointsReady(),
WithInMemoryChannelChannelServiceReady(),
- WithInMemoryChannelAddress(channelServiceAddress),
WithDeadLetterSink(imcDest),
WithInMemoryChannelStatusDLS(dlsStatus),
WithInMemoryChannelAddress(duckv1.Addressable{
URL: channelServiceAddress.URL,
Audience: &channelAudience,
}),
+ WithInMemoryChannelEventPoliciesReadyBecauseNoPolicyAndOIDCEnabled(),
),
}},
Ctx: feature.ToContext(context.Background(), feature.Flags{
- feature.OIDCAuthentication: feature.Enabled,
+ feature.OIDCAuthentication: feature.Enabled,
+ feature.AuthorizationDefaultMode: feature.AuthorizationAllowSameNamespace,
}),
+ }, {
+ Name: "Should provision applying EventPolicies",
+ Key: imcKey,
+ Objects: []runtime.Object{
+ makeDLSServiceAsUnstructured(),
+ makeReadyDeployment(),
+ makeService(),
+ makeReadyEndpoints(),
+ NewInMemoryChannel(imcName, testNS,
+ WithDeadLetterSink(imcDest),
+ WithInMemoryChannelGeneration(imcGeneration),
+ ),
+ makeChannelService(NewInMemoryChannel(imcName, testNS)),
+ NewEventPolicy(readyEventPolicyName, testNS,
+ WithReadyEventPolicyCondition,
+ WithEventPolicyToRef(v1alpha1.EventPolicyToReference{
+ APIVersion: v1.SchemeGroupVersion.String(),
+ Kind: "InMemoryChannel",
+ Name: imcName,
+ }),
+ ),
+ },
+ WantErr: false,
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewInMemoryChannel(imcName, testNS,
+ WithInitInMemoryChannelConditions,
+ WithInMemoryChannelDeploymentReady(),
+ WithInMemoryChannelGeneration(imcGeneration),
+ WithInMemoryChannelStatusObservedGeneration(imcGeneration),
+ WithInMemoryChannelServiceReady(),
+ WithInMemoryChannelEndpointsReady(),
+ WithInMemoryChannelChannelServiceReady(),
+ WithInMemoryChannelAddress(channelServiceAddress),
+ WithDeadLetterSink(imcDest),
+ WithInMemoryChannelStatusDLS(dlsStatus),
+ WithInMemoryChannelEventPoliciesReady(),
+ WithInMemoryChannelEventPoliciesListed(readyEventPolicyName),
+ ),
+ }},
+ }, {
+ Name: "Should mark NotReady on unready EventPolicy",
+ Key: imcKey,
+ Objects: []runtime.Object{
+ makeDLSServiceAsUnstructured(),
+ makeReadyDeployment(),
+ makeService(),
+ makeReadyEndpoints(),
+ NewInMemoryChannel(imcName, testNS,
+ WithDeadLetterSink(imcDest),
+ WithInMemoryChannelGeneration(imcGeneration),
+ ),
+ makeChannelService(NewInMemoryChannel(imcName, testNS)),
+ NewEventPolicy(unreadyEventPolicyName, testNS,
+ WithUnreadyEventPolicyCondition,
+ WithEventPolicyToRef(v1alpha1.EventPolicyToReference{
+ APIVersion: v1.SchemeGroupVersion.String(),
+ Kind: "InMemoryChannel",
+ Name: imcName,
+ }),
+ ),
+ },
+ WantErr: false,
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewInMemoryChannel(imcName, testNS,
+ WithInitInMemoryChannelConditions,
+ WithInMemoryChannelDeploymentReady(),
+ WithInMemoryChannelGeneration(imcGeneration),
+ WithInMemoryChannelStatusObservedGeneration(imcGeneration),
+ WithInMemoryChannelServiceReady(),
+ WithInMemoryChannelEndpointsReady(),
+ WithInMemoryChannelChannelServiceReady(),
+ WithInMemoryChannelAddress(channelServiceAddress),
+ WithDeadLetterSink(imcDest),
+ WithInMemoryChannelStatusDLS(dlsStatus),
+ WithInMemoryChannelEventPoliciesNotReady("EventPoliciesNotReady", fmt.Sprintf("event policies %s are not ready", unreadyEventPolicyName)),
+ ),
+ }},
+ }, {
+ Name: "Should list only Ready EventPolicy",
+ Key: imcKey,
+ Objects: []runtime.Object{
+ makeDLSServiceAsUnstructured(),
+ makeReadyDeployment(),
+ makeService(),
+ makeReadyEndpoints(),
+ NewInMemoryChannel(imcName, testNS,
+ WithDeadLetterSink(imcDest),
+ WithInMemoryChannelGeneration(imcGeneration),
+ ),
+ makeChannelService(NewInMemoryChannel(imcName, testNS)),
+ NewEventPolicy(readyEventPolicyName, testNS,
+ WithReadyEventPolicyCondition,
+ WithEventPolicyToRef(v1alpha1.EventPolicyToReference{
+ APIVersion: v1.SchemeGroupVersion.String(),
+ Kind: "InMemoryChannel",
+ Name: imcName,
+ }),
+ ),
+ NewEventPolicy(unreadyEventPolicyName, testNS,
+ WithUnreadyEventPolicyCondition,
+ WithEventPolicyToRef(v1alpha1.EventPolicyToReference{
+ APIVersion: v1.SchemeGroupVersion.String(),
+ Kind: "InMemoryChannel",
+ Name: imcName,
+ }),
+ ),
+ },
+ WantErr: false,
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewInMemoryChannel(imcName, testNS,
+ WithInitInMemoryChannelConditions,
+ WithInMemoryChannelDeploymentReady(),
+ WithInMemoryChannelGeneration(imcGeneration),
+ WithInMemoryChannelStatusObservedGeneration(imcGeneration),
+ WithInMemoryChannelServiceReady(),
+ WithInMemoryChannelEndpointsReady(),
+ WithInMemoryChannelChannelServiceReady(),
+ WithInMemoryChannelAddress(channelServiceAddress),
+ WithDeadLetterSink(imcDest),
+ WithInMemoryChannelStatusDLS(dlsStatus),
+ WithInMemoryChannelEventPoliciesNotReady("EventPoliciesNotReady", fmt.Sprintf("event policies %s are not ready", unreadyEventPolicyName)),
+ WithInMemoryChannelEventPoliciesListed(readyEventPolicyName),
+ ),
+ }},
}}
logger := logtesting.TestLogger(t)
@@ -645,13 +784,14 @@ func TestAllCases(t *testing.T) {
}
r := &Reconciler{
- kubeClientSet: fakekubeclient.Get(ctx),
- systemNamespace: testNS,
- deploymentLister: listers.GetDeploymentLister(),
- serviceLister: listers.GetServiceLister(),
- endpointsLister: listers.GetEndpointsLister(),
- secretLister: listers.GetSecretLister(),
- uriResolver: resolver.NewURIResolverFromTracker(ctx, tracker.New(func(types.NamespacedName) {}, 0)),
+ kubeClientSet: fakekubeclient.Get(ctx),
+ systemNamespace: testNS,
+ deploymentLister: listers.GetDeploymentLister(),
+ serviceLister: listers.GetServiceLister(),
+ endpointsLister: listers.GetEndpointsLister(),
+ secretLister: listers.GetSecretLister(),
+ eventPolicyLister: listers.GetEventPolicyLister(),
+ uriResolver: resolver.NewURIResolverFromTracker(ctx, tracker.New(func(types.NamespacedName) {}, 0)),
}
return inmemorychannel.NewReconciler(ctx, logger,
fakeeventingclient.Get(ctx), listers.GetInMemoryChannelLister(),
@@ -703,6 +843,7 @@ func TestInNamespace(t *testing.T) {
WithInMemoryChannelAddress(channelServiceAddress),
WithDeadLetterSink(imcDest),
WithInMemoryChannelStatusDLS(dlsStatus),
+ WithInMemoryChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
}},
WantEvents: []string{
@@ -742,6 +883,7 @@ func TestInNamespace(t *testing.T) {
WithInMemoryChannelAddress(channelServiceAddress),
WithDeadLetterSink(imcDest),
WithInMemoryChannelStatusDLS(dlsStatus),
+ WithInMemoryChannelEventPoliciesReadyBecauseOIDCDisabled(),
),
}},
},
@@ -763,6 +905,7 @@ func TestInNamespace(t *testing.T) {
serviceAccountLister: listers.GetServiceAccountLister(),
roleBindingLister: listers.GetRoleBindingLister(),
secretLister: listers.GetSecretLister(),
+ eventPolicyLister: listers.GetEventPolicyLister(),
eventDispatcherConfigStore: eventDispatcherConfigStore,
uriResolver: resolver.NewURIResolverFromTracker(ctx, tracker.New(func(types.NamespacedName) {}, 0)),
}
diff --git a/pkg/reconciler/inmemorychannel/dispatcher/inmemorychannel_test.go b/pkg/reconciler/inmemorychannel/dispatcher/inmemorychannel_test.go
index 5e5d8862291..b50a27c351d 100644
--- a/pkg/reconciler/inmemorychannel/dispatcher/inmemorychannel_test.go
+++ b/pkg/reconciler/inmemorychannel/dispatcher/inmemorychannel_test.go
@@ -268,7 +268,8 @@ func TestAllCases(t *testing.T) {
},
}),
WithInMemoryChannelAddress(channelServiceAddress),
- WithInMemoryChannelDLSUnknown()),
+ WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReady()),
},
WantEvents: []string{
Eventf(corev1.EventTypeWarning, "InternalError", "failed to parse Spec.BackoffDelay: expected 'P' period mark at the start: garbage"),
@@ -346,7 +347,8 @@ func TestReconciler_ReconcileKind(t *testing.T) {
WithInMemoryChannelChannelServiceReady(),
WithInMemoryChannelSubscribers(subscribers),
WithInMemoryChannelAddress(channelServiceAddress),
- WithInMemoryChannelDLSUnknown()),
+ WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReady()),
wantSubs: []fanout.Subscription{{
Subscriber: duckv1.Addressable{
URL: apis.HTTP("call1"),
@@ -375,7 +377,8 @@ func TestReconciler_ReconcileKind(t *testing.T) {
WithInMemoryChannelChannelServiceReady(),
WithInMemoryChannelSubscribers(subscribers),
WithInMemoryChannelAddress(channelServiceAddress),
- WithInMemoryChannelDLSUnknown()),
+ WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReady()),
subs: []fanout.Subscription{*subscription1},
wantSubs: []fanout.Subscription{{
Namespace: testNS,
@@ -403,7 +406,8 @@ func TestReconciler_ReconcileKind(t *testing.T) {
WithInMemoryChannelChannelServiceReady(),
WithInMemoryChannelSubscribers(subscribers),
WithInMemoryChannelAddress(channelServiceAddress),
- WithInMemoryChannelDLSUnknown()),
+ WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReady()),
subs: []fanout.Subscription{*subscription1, *subscription2},
wantSubs: []fanout.Subscription{{
Namespace: testNS,
@@ -431,7 +435,8 @@ func TestReconciler_ReconcileKind(t *testing.T) {
WithInMemoryChannelChannelServiceReady(),
WithInMemoryChannelSubscribers([]eventingduckv1.SubscriberSpec{subscriber1}),
WithInMemoryChannelAddress(channelServiceAddress),
- WithInMemoryChannelDLSUnknown()),
+ WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReady()),
subs: []fanout.Subscription{*subscription1, *subscription2},
wantSubs: []fanout.Subscription{
{
@@ -453,7 +458,8 @@ func TestReconciler_ReconcileKind(t *testing.T) {
WithInMemoryChannelChannelServiceReady(),
WithInMemoryChannelSubscribers([]eventingduckv1.SubscriberSpec{subscriber1, subscriber3}),
WithInMemoryChannelAddress(channelServiceAddress),
- WithInMemoryChannelDLSUnknown()),
+ WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReady()),
subs: []fanout.Subscription{*subscription1, *subscription2},
wantSubs: []fanout.Subscription{
{
@@ -482,7 +488,8 @@ func TestReconciler_ReconcileKind(t *testing.T) {
WithInMemoryChannelChannelServiceReady(),
WithInMemoryChannelSubscribers([]eventingduckv1.SubscriberSpec{subscriber1WithLinearRetry}),
WithInMemoryChannelAddress(channelServiceAddress),
- WithInMemoryChannelDLSUnknown()),
+ WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReady()),
subs: []fanout.Subscription{{
Subscriber: duckv1.Addressable{
URL: apis.HTTP("call1"),
@@ -537,7 +544,7 @@ func TestReconciler_ReconcileKind(t *testing.T) {
}
channelHandler := handler.GetChannelHandler(channelServiceAddress.URL.Host)
if channelHandler == nil {
- t.Errorf("Did not get handler for %s", channelServiceAddress.URL.Host)
+ t.Fatalf("Did not get handler for %s", channelServiceAddress.URL.Host)
}
if diff := cmp.Diff(tc.wantSubs, channelHandler.GetSubscriptions(context.TODO()), cmpopts.IgnoreFields(kncloudevents.RetryConfig{}, "Backoff", "CheckRetry"), cmpopts.IgnoreFields(fanout.Subscription{}, "UID")); diff != "" {
t.Error("unexpected subs (+want/-got)", diff)
diff --git a/pkg/reconciler/inmemorychannel/dispatcher/readiness_test.go b/pkg/reconciler/inmemorychannel/dispatcher/readiness_test.go
index e0ec28d8ce1..9bc37da21b2 100644
--- a/pkg/reconciler/inmemorychannel/dispatcher/readiness_test.go
+++ b/pkg/reconciler/inmemorychannel/dispatcher/readiness_test.go
@@ -40,6 +40,7 @@ func TestReadinessChecker(t *testing.T) {
WithInMemoryChannelChannelServiceReady(),
WithInMemoryChannelAddress(duckv1.Addressable{URL: apis.HTTP("fake-address")}),
WithInMemoryChannelDLSUnknown(),
+ WithInMemoryChannelEventPoliciesReady(),
),
})
diff --git a/pkg/reconciler/testing/v1/eventpolicy.go b/pkg/reconciler/testing/v1/eventpolicy.go
new file mode 100644
index 00000000000..a54b07a03e0
--- /dev/null
+++ b/pkg/reconciler/testing/v1/eventpolicy.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "context"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ "knative.dev/pkg/apis"
+)
+
+// EventPolicyOption enables further configuration of an EventPolicy.
+type EventPolicyOption func(*v1alpha1.EventPolicy)
+
+// NewEventPolicy creates a EventPolicy with EventPolicyOptions.
+func NewEventPolicy(name, namespace string, o ...EventPolicyOption) *v1alpha1.EventPolicy {
+ ep := &v1alpha1.EventPolicy{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ }
+ for _, opt := range o {
+ opt(ep)
+ }
+ ep.SetDefaults(context.Background())
+
+ return ep
+}
+
+func WithInitEventPolicyConditions(et *v1alpha1.EventPolicy) {
+ et.Status.InitializeConditions()
+}
+
+func WithReadyEventPolicyCondition(ep *v1alpha1.EventPolicy) {
+ ep.Status.Conditions = []apis.Condition{
+ {
+ Type: v1alpha1.EventPolicyConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ }
+}
+
+func WithUnreadyEventPolicyCondition(ep *v1alpha1.EventPolicy) {
+ ep.Status.Conditions = []apis.Condition{
+ {
+ Type: v1alpha1.EventPolicyConditionReady,
+ Status: corev1.ConditionFalse,
+ },
+ }
+}
+
+func WithEventPolicyTo(tos ...v1alpha1.EventPolicySpecTo) EventPolicyOption {
+ return func(ep *v1alpha1.EventPolicy) {
+ ep.Spec.To = append(ep.Spec.To, tos...)
+ }
+}
+
+func WithEventPolicyToRef(ref v1alpha1.EventPolicyToReference) EventPolicyOption {
+ return func(ep *v1alpha1.EventPolicy) {
+ ep.Spec.To = append(ep.Spec.To, v1alpha1.EventPolicySpecTo{
+ Ref: &ref,
+ })
+ }
+}
+
+func WithEventPolicyFrom(froms ...v1alpha1.EventPolicySpecFrom) EventPolicyOption {
+ return func(ep *v1alpha1.EventPolicy) {
+ ep.Spec.From = append(ep.Spec.From, froms...)
+ }
+}
+
+func WithEventPolicyLabels(labels map[string]string) EventPolicyOption {
+ return func(ep *v1alpha1.EventPolicy) {
+ ep.ObjectMeta.Labels = labels
+ }
+}
+
+func WithEventPolicyOwnerReference(ownerRef metav1.OwnerReference) EventPolicyOption {
+ return func(ep *v1alpha1.EventPolicy) {
+ ep.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
+ ownerRef,
+ }
+ }
+}
diff --git a/pkg/reconciler/testing/v1/inmemorychannel.go b/pkg/reconciler/testing/v1/inmemorychannel.go
index bd6398e67bf..4405cfaf533 100644
--- a/pkg/reconciler/testing/v1/inmemorychannel.go
+++ b/pkg/reconciler/testing/v1/inmemorychannel.go
@@ -18,6 +18,9 @@ import (
"fmt"
"time"
+ "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ "knative.dev/eventing/pkg/apis/feature"
+
"k8s.io/apimachinery/pkg/types"
appsv1 "k8s.io/api/apps/v1"
@@ -25,6 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
duckv1 "knative.dev/pkg/apis/duck/v1"
+ eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
eventingv1 "knative.dev/eventing/pkg/apis/duck/v1"
"knative.dev/eventing/pkg/apis/eventing"
"knative.dev/eventing/pkg/apis/messaging"
@@ -138,6 +142,41 @@ func WithInMemoryChannelEndpointsReady() InMemoryChannelOption {
}
}
+func WithInMemoryChannelEventPoliciesReady() InMemoryChannelOption {
+ return func(imc *v1.InMemoryChannel) {
+ imc.Status.MarkEventPoliciesTrue()
+ }
+}
+
+func WithInMemoryChannelEventPoliciesNotReady(reason, message string) InMemoryChannelOption {
+ return func(imc *v1.InMemoryChannel) {
+ imc.Status.MarkEventPoliciesFailed(reason, message)
+ }
+}
+
+func WithInMemoryChannelEventPoliciesListed(policyNames ...string) InMemoryChannelOption {
+ return func(imc *v1.InMemoryChannel) {
+ for _, names := range policyNames {
+ imc.Status.Policies = append(imc.Status.Policies, eventingduckv1.AppliedEventPolicyRef{
+ APIVersion: v1alpha1.SchemeGroupVersion.String(),
+ Name: names,
+ })
+ }
+ }
+}
+
+func WithInMemoryChannelEventPoliciesReadyBecauseOIDCDisabled() InMemoryChannelOption {
+ return func(imc *v1.InMemoryChannel) {
+ imc.Status.MarkEventPoliciesTrueWithReason("OIDCDisabled", "Feature %q must be enabled to support Authorization", feature.OIDCAuthentication)
+ }
+}
+
+func WithInMemoryChannelEventPoliciesReadyBecauseNoPolicyAndOIDCEnabled() InMemoryChannelOption {
+ return func(imc *v1.InMemoryChannel) {
+ imc.Status.MarkEventPoliciesTrueWithReason("DefaultAuthorizationMode", "Default authz mode is %q", feature.AuthorizationAllowSameNamespace)
+ }
+}
+
func WithInMemoryChannelAddress(a duckv1.Addressable) InMemoryChannelOption {
return func(imc *v1.InMemoryChannel) {
imc.Status.SetAddress(&a)
diff --git a/pkg/reconciler/testing/v1/listers.go b/pkg/reconciler/testing/v1/listers.go
index 788b2fab2cc..0c7c3546d2c 100644
--- a/pkg/reconciler/testing/v1/listers.go
+++ b/pkg/reconciler/testing/v1/listers.go
@@ -31,12 +31,14 @@ import (
"k8s.io/client-go/tools/cache"
eventingduck "knative.dev/eventing/pkg/apis/duck/v1"
eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
eventingv1beta2 "knative.dev/eventing/pkg/apis/eventing/v1beta2"
flowsv1 "knative.dev/eventing/pkg/apis/flows/v1"
messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1"
fakeeventingclientset "knative.dev/eventing/pkg/client/clientset/versioned/fake"
eventinglisters "knative.dev/eventing/pkg/client/listers/eventing/v1"
+ eventingv1alpha1listers "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1"
eventingv1beta2listers "knative.dev/eventing/pkg/client/listers/eventing/v1beta2"
flowslisters "knative.dev/eventing/pkg/client/listers/flows/v1"
messaginglisters "knative.dev/eventing/pkg/client/listers/messaging/v1"
@@ -112,6 +114,10 @@ func (l *Listers) GetEventTypeLister() eventingv1beta2listers.EventTypeLister {
return eventingv1beta2listers.NewEventTypeLister(l.indexerFor(&eventingv1beta2.EventType{}))
}
+func (l *Listers) GetEventPolicyLister() eventingv1alpha1listers.EventPolicyLister {
+ return eventingv1alpha1listers.NewEventPolicyLister(l.indexerFor(&eventingv1alpha1.EventPolicy{}))
+}
+
func (l *Listers) GetPingSourceLister() sourcelisters.PingSourceLister {
return sourcelisters.NewPingSourceLister(l.indexerFor(&sourcesv1.PingSource{}))
}
From f84a98c609012c98534b685470d8d8fc1fc18b42 Mon Sep 17 00:00:00 2001
From: Pierangelo Di Pilato
Date: Thu, 20 Jun 2024 19:37:02 +0200
Subject: [PATCH 20/33] Remove unused performance tests (#7707)
* Remove unused performance tests
Signed-off-by: Pierangelo Di Pilato
* Use 3.0.3 go-jose
Signed-off-by: Pierangelo Di Pilato
* Upgrade dependencies
Signed-off-by: Pierangelo Di Pilato
---------
Signed-off-by: Pierangelo Di Pilato
---
go.mod | 22 +-
go.sum | 46 +-
hack/tools.go | 1 -
test/performance/README.md | 123 -
.../broker-imc/100-broker-perf-setup.yaml | 174 -
.../200-broker-imc-continuous-load-setup.yaml | 161 -
.../300-broker-imc-increasing-load-setup.yaml | 118 -
.../benchmarks/broker-imc/cluster.yaml | 21 -
.../benchmarks/broker-imc/dev.config | 70 -
.../benchmarks/broker-imc/prod.config | 64 -
.../channel-imc/100-channel-perf-setup.yaml | 108 -
...200-channel-imc-continuous-load-setup.yaml | 152 -
.../300-channel-imc-constant-load-setup.yaml | 112 -
.../benchmarks/channel-imc/cluster.yaml | 21 -
.../benchmarks/channel-imc/dev.config | 70 -
.../benchmarks/channel-imc/prod.config | 64 -
test/performance/config/config-mako.yaml | 115 -
.../direct/100-direct-perf-setup.yaml | 178 -
test/performance/direct/200-direct-perf.yaml | 112 -
test/performance/infra/OWNERS | 12 -
.../infra/aggregator/aggregator.go | 384 -
test/performance/infra/common/constants.go | 30 -
.../infra/common/event_timestamp.go | 25 -
test/performance/infra/common/executor.go | 42 -
test/performance/infra/common/pace.go | 74 -
test/performance/infra/event_state/client.go | 60 -
.../infra/event_state/event_state.pb.go | 298 -
.../infra/event_state/event_state.proto | 42 -
test/performance/infra/image_helpers.go | 170 -
.../infra/receiver/id_extractor.go | 31 -
test/performance/infra/receiver/receiver.go | 191 -
.../infra/receiver/type_extractor.go | 31 -
.../infra/sender/http_load_generator.go | 231 -
.../infra/sender/http_load_generator_test.go | 123 -
.../infra/sender/load_generator.go | 34 -
.../infra/sender/request_interceptor.go | 36 -
.../infra/sender/request_interceptor_test.go | 50 -
test/performance/infra/sender/sender.go | 217 -
test/performance/latency-and-thpt-plot.plg | 14 -
test/performance/latency-plot.plg | 6 -
test/performance/performance-tests.sh | 67 -
test/performance/sample-dev.config | 75 -
test/performance/sample-prod.config | 68 -
test/performance/thpt-plot.plg | 4 -
test/test_images/performance/OWNERS | 12 -
test/test_images/performance/README.md | 125 -
test/test_images/performance/kodata/HEAD | 1 -
test/test_images/performance/kodata/refs | 1 -
test/test_images/performance/main.go | 42 -
test/test_images/performance/mako.config | 51 -
test/test_images/performance/pod.yaml | 12 -
.../github.com/golang/glog/LICENSE | 191 -
.../google/go-github/v27/github/LICENSE | 27 -
.../google/go-querystring/query/LICENSE | 27 -
.../github.com/google/mako/LICENSE | 204 -
.../github.com/influxdata/tdigest/LICENSE | 202 -
.../github.com/rogpeppe/fastuuid/LICENSE | 26 -
.../github.com/rs/dnscache/LICENSE | 21 -
.../github.com/tsenart/vegeta/v12/lib/LICENSE | 20 -
vendor/cloud.google.com/go/storage/CHANGES.md | 19 +
vendor/cloud.google.com/go/storage/acl.go | 11 -
vendor/cloud.google.com/go/storage/bucket.go | 65 +
vendor/cloud.google.com/go/storage/hmac.go | 1 -
.../go/storage/http_client.go | 37 +-
.../storage/internal/apiv2/storage_client.go | 4 +-
.../internal/apiv2/storagepb/storage.pb.go | 4 +-
.../go/storage/internal/version.go | 2 +-
vendor/cloud.google.com/go/storage/invoke.go | 16 +-
vendor/cloud.google.com/go/storage/option.go | 19 +-
vendor/cloud.google.com/go/storage/reader.go | 12 +
vendor/cloud.google.com/go/storage/storage.go | 10 +-
vendor/github.com/golang/glog/LICENSE | 191 -
vendor/github.com/golang/glog/README.md | 36 -
vendor/github.com/golang/glog/glog.go | 777 -
vendor/github.com/golang/glog/glog_file.go | 413 -
.../github.com/golang/glog/glog_file_linux.go | 39 -
.../github.com/golang/glog/glog_file_other.go | 30 -
.../github.com/golang/glog/glog_file_posix.go | 53 -
vendor/github.com/golang/glog/glog_flags.go | 398 -
.../golang/glog/internal/logsink/logsink.go | 393 -
.../glog/internal/logsink/logsink_fatal.go | 35 -
.../glog/internal/stackdump/stackdump.go | 127 -
.../golang/protobuf/jsonpb/decode.go | 531 -
.../golang/protobuf/jsonpb/encode.go | 560 -
.../github.com/golang/protobuf/jsonpb/json.go | 69 -
.../github.com/google/go-github/v27/AUTHORS | 251 -
.../github.com/google/go-github/v27/LICENSE | 27 -
.../google/go-github/v27/github/activity.go | 69 -
.../go-github/v27/github/activity_events.go | 215 -
.../v27/github/activity_notifications.go | 223 -
.../go-github/v27/github/activity_star.go | 137 -
.../go-github/v27/github/activity_watching.go | 146 -
.../google/go-github/v27/github/admin.go | 101 -
.../google/go-github/v27/github/admin_orgs.go | 43 -
.../go-github/v27/github/admin_stats.go | 171 -
.../go-github/v27/github/admin_users.go | 61 -
.../google/go-github/v27/github/apps.go | 284 -
.../go-github/v27/github/apps_installation.go | 103 -
.../go-github/v27/github/apps_marketplace.go | 184 -
.../go-github/v27/github/authorizations.go | 435 -
.../google/go-github/v27/github/checks.go | 435 -
.../google/go-github/v27/github/doc.go | 188 -
.../google/go-github/v27/github/event.go | 132 -
.../go-github/v27/github/event_types.go | 890 --
.../google/go-github/v27/github/gists.go | 363 -
.../go-github/v27/github/gists_comments.go | 119 -
.../google/go-github/v27/github/git.go | 12 -
.../google/go-github/v27/github/git_blobs.go | 69 -
.../go-github/v27/github/git_commits.go | 200 -
.../google/go-github/v27/github/git_refs.go | 222 -
.../google/go-github/v27/github/git_tags.go | 76 -
.../google/go-github/v27/github/git_trees.go | 99 -
.../go-github/v27/github/github-accessors.go | 12981 ----------------
.../google/go-github/v27/github/github.go | 1047 --
.../google/go-github/v27/github/gitignore.go | 64 -
.../go-github/v27/github/interactions.go | 28 -
.../go-github/v27/github/interactions_orgs.go | 80 -
.../v27/github/interactions_repos.go | 80 -
.../google/go-github/v27/github/issues.go | 347 -
.../go-github/v27/github/issues_assignees.go | 85 -
.../go-github/v27/github/issues_comments.go | 153 -
.../go-github/v27/github/issues_events.go | 175 -
.../go-github/v27/github/issues_labels.go | 261 -
.../go-github/v27/github/issues_milestones.go | 148 -
.../go-github/v27/github/issues_timeline.go | 154 -
.../google/go-github/v27/github/licenses.go | 97 -
.../google/go-github/v27/github/messages.go | 258 -
.../google/go-github/v27/github/migrations.go | 224 -
.../v27/github/migrations_source_import.go | 329 -
.../go-github/v27/github/migrations_user.go | 214 -
.../google/go-github/v27/github/misc.go | 257 -
.../google/go-github/v27/github/orgs.go | 215 -
.../google/go-github/v27/github/orgs_hooks.go | 118 -
.../go-github/v27/github/orgs_members.go | 370 -
.../v27/github/orgs_outside_collaborators.go | 81 -
.../go-github/v27/github/orgs_projects.go | 60 -
.../v27/github/orgs_users_blocking.go | 91 -
.../google/go-github/v27/github/projects.go | 594 -
.../google/go-github/v27/github/pulls.go | 478 -
.../go-github/v27/github/pulls_comments.go | 189 -
.../go-github/v27/github/pulls_reviewers.go | 80 -
.../go-github/v27/github/pulls_reviews.go | 261 -
.../google/go-github/v27/github/reactions.go | 377 -
.../google/go-github/v27/github/repos.go | 1303 --
.../v27/github/repos_collaborators.go | 137 -
.../go-github/v27/github/repos_comments.go | 162 -
.../go-github/v27/github/repos_commits.go | 264 -
.../v27/github/repos_community_health.go | 59 -
.../go-github/v27/github/repos_contents.go | 269 -
.../go-github/v27/github/repos_deployments.go | 229 -
.../go-github/v27/github/repos_forks.go | 96 -
.../go-github/v27/github/repos_hooks.go | 226 -
.../go-github/v27/github/repos_invitations.go | 89 -
.../google/go-github/v27/github/repos_keys.go | 111 -
.../go-github/v27/github/repos_merging.go | 38 -
.../go-github/v27/github/repos_pages.go | 190 -
.../v27/github/repos_prereceive_hooks.go | 110 -
.../go-github/v27/github/repos_projects.go | 69 -
.../go-github/v27/github/repos_releases.go | 374 -
.../go-github/v27/github/repos_stats.go | 226 -
.../go-github/v27/github/repos_statuses.go | 131 -
.../go-github/v27/github/repos_traffic.go | 141 -
.../google/go-github/v27/github/search.go | 258 -
.../google/go-github/v27/github/strings.go | 93 -
.../google/go-github/v27/github/teams.go | 562 -
.../v27/github/teams_discussion_comments.go | 155 -
.../go-github/v27/github/teams_discussions.go | 160 -
.../go-github/v27/github/teams_members.go | 174 -
.../google/go-github/v27/github/timestamp.go | 41 -
.../google/go-github/v27/github/users.go | 278 -
.../v27/github/users_administration.go | 72 -
.../go-github/v27/github/users_blocking.go | 91 -
.../go-github/v27/github/users_emails.go | 72 -
.../go-github/v27/github/users_followers.go | 119 -
.../go-github/v27/github/users_gpg_keys.go | 128 -
.../google/go-github/v27/github/users_keys.go | 109 -
.../go-github/v27/github/with_appengine.go | 20 -
.../go-github/v27/github/without_appengine.go | 19 -
.../github.com/google/go-querystring/LICENSE | 27 -
.../google/go-querystring/query/encode.go | 320 -
vendor/github.com/google/mako/LICENSE | 204 -
.../threshold_analyzer.pb.go | 420 -
.../utest_analyzer.pb.go | 495 -
.../window_deviation.pb.go | 855 -
.../google/mako/go/quickstore/quickstore.go | 301 -
.../mako/internal/go/common/common_deps.go | 17 -
.../quickstore_go_proto/quickstore.pb.go | 359 -
.../quickstore_go_proto/quickstore.pb.go | 439 -
.../mako/spec/proto/mako_go_proto/mako.pb.go | 4052 -----
.../github.com/influxdata/tdigest/.gitignore | 1 -
vendor/github.com/influxdata/tdigest/LICENSE | 202 -
.../github.com/influxdata/tdigest/README.md | 42 -
.../github.com/influxdata/tdigest/centroid.go | 60 -
.../github.com/influxdata/tdigest/tdigest.go | 276 -
vendor/github.com/mailru/easyjson/.gitignore | 6 -
vendor/github.com/mailru/easyjson/.travis.yml | 15 -
vendor/github.com/mailru/easyjson/Makefile | 72 -
vendor/github.com/mailru/easyjson/README.md | 387 -
vendor/github.com/mailru/easyjson/helpers.go | 114 -
vendor/github.com/mailru/easyjson/raw.go | 45 -
.../mailru/easyjson/unknown_fields.go | 32 -
vendor/github.com/rogpeppe/fastuuid/LICENSE | 26 -
vendor/github.com/rogpeppe/fastuuid/README.md | 95 -
vendor/github.com/rogpeppe/fastuuid/uuid.go | 146 -
vendor/github.com/rs/dnscache/.travis.yml | 13 -
vendor/github.com/rs/dnscache/LICENSE | 21 -
vendor/github.com/rs/dnscache/README.md | 78 -
vendor/github.com/rs/dnscache/dnscache.go | 275 -
vendor/github.com/tsenart/vegeta/v12/LICENSE | 20 -
.../tsenart/vegeta/v12/lib/attack.go | 563 -
.../tsenart/vegeta/v12/lib/attack_fuzz.go | 154 -
.../tsenart/vegeta/v12/lib/histogram.go | 86 -
.../tsenart/vegeta/v12/lib/metrics.go | 201 -
.../tsenart/vegeta/v12/lib/pacer.go | 309 -
.../tsenart/vegeta/v12/lib/reporters.go | 279 -
.../tsenart/vegeta/v12/lib/results.go | 302 -
.../vegeta/v12/lib/results_easyjson.go | 220 -
.../tsenart/vegeta/v12/lib/results_fuzz.go | 64 -
.../tsenart/vegeta/v12/lib/target.schema.json | 39 -
.../tsenart/vegeta/v12/lib/targets.go | 380 -
.../vegeta/v12/lib/targets_easyjson.go | 167 -
.../tsenart/vegeta/v12/lib/targets_fuzz.go | 69 -
.../tsenart/vegeta/v12/lib/util_fuzz.go | 121 -
vendor/golang.org/x/crypto/cast5/cast5.go | 536 -
.../x/crypto/openpgp/armor/armor.go | 232 -
.../x/crypto/openpgp/armor/encode.go | 161 -
.../x/crypto/openpgp/canonical_text.go | 59 -
.../x/crypto/openpgp/elgamal/elgamal.go | 130 -
.../x/crypto/openpgp/errors/errors.go | 78 -
vendor/golang.org/x/crypto/openpgp/keys.go | 693 -
.../x/crypto/openpgp/packet/compressed.go | 123 -
.../x/crypto/openpgp/packet/config.go | 91 -
.../x/crypto/openpgp/packet/encrypted_key.go | 208 -
.../x/crypto/openpgp/packet/literal.go | 89 -
.../x/crypto/openpgp/packet/ocfb.go | 143 -
.../openpgp/packet/one_pass_signature.go | 73 -
.../x/crypto/openpgp/packet/opaque.go | 161 -
.../x/crypto/openpgp/packet/packet.go | 590 -
.../x/crypto/openpgp/packet/private_key.go | 384 -
.../x/crypto/openpgp/packet/public_key.go | 753 -
.../x/crypto/openpgp/packet/public_key_v3.go | 279 -
.../x/crypto/openpgp/packet/reader.go | 76 -
.../x/crypto/openpgp/packet/signature.go | 731 -
.../x/crypto/openpgp/packet/signature_v3.go | 146 -
.../openpgp/packet/symmetric_key_encrypted.go | 155 -
.../openpgp/packet/symmetrically_encrypted.go | 290 -
.../x/crypto/openpgp/packet/userattribute.go | 90 -
.../x/crypto/openpgp/packet/userid.go | 159 -
vendor/golang.org/x/crypto/openpgp/read.go | 448 -
vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 279 -
vendor/golang.org/x/crypto/openpgp/write.go | 418 -
.../x/sync/singleflight/singleflight.go | 214 -
vendor/knative.dev/pkg/test/ghutil/client.go | 159 -
vendor/knative.dev/pkg/test/ghutil/issue.go | 254 -
.../pkg/test/ghutil/pullrequest.go | 210 -
vendor/knative.dev/pkg/test/ghutil/repo.go | 49 -
vendor/knative.dev/pkg/test/mako/README.md | 8 -
.../pkg/test/mako/alerter/alerter.go | 85 -
.../pkg/test/mako/alerter/github/issue.go | 287 -
.../pkg/test/mako/alerter/slack/message.go | 129 -
vendor/knative.dev/pkg/test/mako/analyzer.go | 35 -
.../pkg/test/mako/config/benchmark.go | 75 -
.../pkg/test/mako/config/configmap.go | 93 -
.../pkg/test/mako/config/environment.go | 66 -
.../knative.dev/pkg/test/mako/config/slack.go | 56 -
vendor/knative.dev/pkg/test/mako/sidecar.go | 188 -
.../pkg/test/mako/stub-sidecar/main.go | 169 -
.../test/mako/stub-sidecar/read_results.sh | 74 -
vendor/knative.dev/pkg/test/mako/time.go | 26 -
vendor/knative.dev/pkg/test/slackutil/http.go | 53 -
.../pkg/test/slackutil/message_read.go | 95 -
.../pkg/test/slackutil/message_write.go | 77 -
.../knative.dev/pkg/third_party/mako/LICENSE | 204 -
.../quickstore_go_proto/quickstore.pb.go | 374 -
vendor/modules.txt | 62 +-
275 files changed, 166 insertions(+), 63407 deletions(-)
delete mode 100644 test/performance/README.md
delete mode 100644 test/performance/benchmarks/broker-imc/100-broker-perf-setup.yaml
delete mode 100644 test/performance/benchmarks/broker-imc/200-broker-imc-continuous-load-setup.yaml
delete mode 100644 test/performance/benchmarks/broker-imc/300-broker-imc-increasing-load-setup.yaml
delete mode 100644 test/performance/benchmarks/broker-imc/cluster.yaml
delete mode 100644 test/performance/benchmarks/broker-imc/dev.config
delete mode 100644 test/performance/benchmarks/broker-imc/prod.config
delete mode 100644 test/performance/benchmarks/channel-imc/100-channel-perf-setup.yaml
delete mode 100644 test/performance/benchmarks/channel-imc/200-channel-imc-continuous-load-setup.yaml
delete mode 100644 test/performance/benchmarks/channel-imc/300-channel-imc-constant-load-setup.yaml
delete mode 100644 test/performance/benchmarks/channel-imc/cluster.yaml
delete mode 100644 test/performance/benchmarks/channel-imc/dev.config
delete mode 100644 test/performance/benchmarks/channel-imc/prod.config
delete mode 100644 test/performance/config/config-mako.yaml
delete mode 100644 test/performance/direct/100-direct-perf-setup.yaml
delete mode 100644 test/performance/direct/200-direct-perf.yaml
delete mode 100644 test/performance/infra/OWNERS
delete mode 100644 test/performance/infra/aggregator/aggregator.go
delete mode 100644 test/performance/infra/common/constants.go
delete mode 100644 test/performance/infra/common/event_timestamp.go
delete mode 100644 test/performance/infra/common/executor.go
delete mode 100644 test/performance/infra/common/pace.go
delete mode 100644 test/performance/infra/event_state/client.go
delete mode 100644 test/performance/infra/event_state/event_state.pb.go
delete mode 100644 test/performance/infra/event_state/event_state.proto
delete mode 100644 test/performance/infra/image_helpers.go
delete mode 100644 test/performance/infra/receiver/id_extractor.go
delete mode 100644 test/performance/infra/receiver/receiver.go
delete mode 100644 test/performance/infra/receiver/type_extractor.go
delete mode 100644 test/performance/infra/sender/http_load_generator.go
delete mode 100644 test/performance/infra/sender/http_load_generator_test.go
delete mode 100644 test/performance/infra/sender/load_generator.go
delete mode 100644 test/performance/infra/sender/request_interceptor.go
delete mode 100644 test/performance/infra/sender/request_interceptor_test.go
delete mode 100644 test/performance/infra/sender/sender.go
delete mode 100644 test/performance/latency-and-thpt-plot.plg
delete mode 100644 test/performance/latency-plot.plg
delete mode 100755 test/performance/performance-tests.sh
delete mode 100644 test/performance/sample-dev.config
delete mode 100644 test/performance/sample-prod.config
delete mode 100644 test/performance/thpt-plot.plg
delete mode 100644 test/test_images/performance/OWNERS
delete mode 100644 test/test_images/performance/README.md
delete mode 120000 test/test_images/performance/kodata/HEAD
delete mode 120000 test/test_images/performance/kodata/refs
delete mode 100644 test/test_images/performance/main.go
delete mode 100644 test/test_images/performance/mako.config
delete mode 100644 test/test_images/performance/pod.yaml
delete mode 100644 third_party/VENDOR-LICENSE/github.com/golang/glog/LICENSE
delete mode 100644 third_party/VENDOR-LICENSE/github.com/google/go-github/v27/github/LICENSE
delete mode 100644 third_party/VENDOR-LICENSE/github.com/google/go-querystring/query/LICENSE
delete mode 100644 third_party/VENDOR-LICENSE/github.com/google/mako/LICENSE
delete mode 100644 third_party/VENDOR-LICENSE/github.com/influxdata/tdigest/LICENSE
delete mode 100644 third_party/VENDOR-LICENSE/github.com/rogpeppe/fastuuid/LICENSE
delete mode 100644 third_party/VENDOR-LICENSE/github.com/rs/dnscache/LICENSE
delete mode 100644 third_party/VENDOR-LICENSE/github.com/tsenart/vegeta/v12/lib/LICENSE
delete mode 100644 vendor/github.com/golang/glog/LICENSE
delete mode 100644 vendor/github.com/golang/glog/README.md
delete mode 100644 vendor/github.com/golang/glog/glog.go
delete mode 100644 vendor/github.com/golang/glog/glog_file.go
delete mode 100644 vendor/github.com/golang/glog/glog_file_linux.go
delete mode 100644 vendor/github.com/golang/glog/glog_file_other.go
delete mode 100644 vendor/github.com/golang/glog/glog_file_posix.go
delete mode 100644 vendor/github.com/golang/glog/glog_flags.go
delete mode 100644 vendor/github.com/golang/glog/internal/logsink/logsink.go
delete mode 100644 vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go
delete mode 100644 vendor/github.com/golang/glog/internal/stackdump/stackdump.go
delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/decode.go
delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/encode.go
delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/json.go
delete mode 100644 vendor/github.com/google/go-github/v27/AUTHORS
delete mode 100644 vendor/github.com/google/go-github/v27/LICENSE
delete mode 100644 vendor/github.com/google/go-github/v27/github/activity.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/activity_events.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/activity_notifications.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/activity_star.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/activity_watching.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/admin.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/admin_orgs.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/admin_stats.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/admin_users.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/apps.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/apps_installation.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/apps_marketplace.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/authorizations.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/checks.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/doc.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/event.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/event_types.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/gists.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/gists_comments.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/git.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/git_blobs.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/git_commits.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/git_refs.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/git_tags.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/git_trees.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/github-accessors.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/github.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/gitignore.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/interactions.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/interactions_orgs.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/interactions_repos.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/issues.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/issues_assignees.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/issues_comments.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/issues_events.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/issues_labels.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/issues_milestones.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/issues_timeline.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/licenses.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/messages.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/migrations.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/migrations_source_import.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/migrations_user.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/misc.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/orgs.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/orgs_hooks.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/orgs_members.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/orgs_outside_collaborators.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/orgs_projects.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/orgs_users_blocking.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/projects.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/pulls.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/pulls_comments.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/pulls_reviewers.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/pulls_reviews.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/reactions.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_collaborators.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_comments.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_commits.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_community_health.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_contents.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_deployments.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_forks.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_hooks.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_invitations.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_keys.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_merging.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_pages.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_prereceive_hooks.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_projects.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_releases.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_stats.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_statuses.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/repos_traffic.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/search.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/strings.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/teams.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/teams_discussion_comments.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/teams_discussions.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/teams_members.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/timestamp.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/users.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/users_administration.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/users_blocking.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/users_emails.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/users_followers.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/users_gpg_keys.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/users_keys.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/with_appengine.go
delete mode 100644 vendor/github.com/google/go-github/v27/github/without_appengine.go
delete mode 100644 vendor/github.com/google/go-querystring/LICENSE
delete mode 100644 vendor/github.com/google/go-querystring/query/encode.go
delete mode 100644 vendor/github.com/google/mako/LICENSE
delete mode 100644 vendor/github.com/google/mako/clients/proto/analyzers/threshold_analyzer_go_proto/threshold_analyzer.pb.go
delete mode 100644 vendor/github.com/google/mako/clients/proto/analyzers/utest_analyzer_go_proto/utest_analyzer.pb.go
delete mode 100644 vendor/github.com/google/mako/clients/proto/analyzers/window_deviation_go_proto/window_deviation.pb.go
delete mode 100644 vendor/github.com/google/mako/go/quickstore/quickstore.go
delete mode 100644 vendor/github.com/google/mako/internal/go/common/common_deps.go
delete mode 100644 vendor/github.com/google/mako/internal/quickstore_microservice/proto/quickstore_go_proto/quickstore.pb.go
delete mode 100644 vendor/github.com/google/mako/proto/quickstore/quickstore_go_proto/quickstore.pb.go
delete mode 100644 vendor/github.com/google/mako/spec/proto/mako_go_proto/mako.pb.go
delete mode 100644 vendor/github.com/influxdata/tdigest/.gitignore
delete mode 100644 vendor/github.com/influxdata/tdigest/LICENSE
delete mode 100644 vendor/github.com/influxdata/tdigest/README.md
delete mode 100644 vendor/github.com/influxdata/tdigest/centroid.go
delete mode 100644 vendor/github.com/influxdata/tdigest/tdigest.go
delete mode 100644 vendor/github.com/mailru/easyjson/.gitignore
delete mode 100644 vendor/github.com/mailru/easyjson/.travis.yml
delete mode 100644 vendor/github.com/mailru/easyjson/Makefile
delete mode 100644 vendor/github.com/mailru/easyjson/README.md
delete mode 100644 vendor/github.com/mailru/easyjson/helpers.go
delete mode 100644 vendor/github.com/mailru/easyjson/raw.go
delete mode 100644 vendor/github.com/mailru/easyjson/unknown_fields.go
delete mode 100644 vendor/github.com/rogpeppe/fastuuid/LICENSE
delete mode 100644 vendor/github.com/rogpeppe/fastuuid/README.md
delete mode 100644 vendor/github.com/rogpeppe/fastuuid/uuid.go
delete mode 100644 vendor/github.com/rs/dnscache/.travis.yml
delete mode 100644 vendor/github.com/rs/dnscache/LICENSE
delete mode 100644 vendor/github.com/rs/dnscache/README.md
delete mode 100644 vendor/github.com/rs/dnscache/dnscache.go
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/LICENSE
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/attack.go
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/attack_fuzz.go
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/histogram.go
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/metrics.go
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/pacer.go
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/reporters.go
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/results.go
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/results_easyjson.go
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/results_fuzz.go
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/target.schema.json
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/targets.go
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/targets_easyjson.go
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/targets_fuzz.go
delete mode 100644 vendor/github.com/tsenart/vegeta/v12/lib/util_fuzz.go
delete mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/read.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
delete mode 100644 vendor/golang.org/x/crypto/openpgp/write.go
delete mode 100644 vendor/golang.org/x/sync/singleflight/singleflight.go
delete mode 100644 vendor/knative.dev/pkg/test/ghutil/client.go
delete mode 100644 vendor/knative.dev/pkg/test/ghutil/issue.go
delete mode 100644 vendor/knative.dev/pkg/test/ghutil/pullrequest.go
delete mode 100644 vendor/knative.dev/pkg/test/ghutil/repo.go
delete mode 100644 vendor/knative.dev/pkg/test/mako/README.md
delete mode 100644 vendor/knative.dev/pkg/test/mako/alerter/alerter.go
delete mode 100644 vendor/knative.dev/pkg/test/mako/alerter/github/issue.go
delete mode 100644 vendor/knative.dev/pkg/test/mako/alerter/slack/message.go
delete mode 100644 vendor/knative.dev/pkg/test/mako/analyzer.go
delete mode 100644 vendor/knative.dev/pkg/test/mako/config/benchmark.go
delete mode 100644 vendor/knative.dev/pkg/test/mako/config/configmap.go
delete mode 100644 vendor/knative.dev/pkg/test/mako/config/environment.go
delete mode 100644 vendor/knative.dev/pkg/test/mako/config/slack.go
delete mode 100644 vendor/knative.dev/pkg/test/mako/sidecar.go
delete mode 100644 vendor/knative.dev/pkg/test/mako/stub-sidecar/main.go
delete mode 100644 vendor/knative.dev/pkg/test/mako/stub-sidecar/read_results.sh
delete mode 100644 vendor/knative.dev/pkg/test/mako/time.go
delete mode 100644 vendor/knative.dev/pkg/test/slackutil/http.go
delete mode 100644 vendor/knative.dev/pkg/test/slackutil/message_read.go
delete mode 100644 vendor/knative.dev/pkg/test/slackutil/message_write.go
delete mode 100644 vendor/knative.dev/pkg/third_party/mako/LICENSE
delete mode 100644 vendor/knative.dev/pkg/third_party/mako/proto/quickstore_go_proto/quickstore.pb.go
diff --git a/go.mod b/go.mod
index db0a2a1826c..99fdc1fab8d 100644
--- a/go.mod
+++ b/go.mod
@@ -12,10 +12,8 @@ require (
github.com/coreos/go-oidc/v3 v3.9.0
github.com/eclipse/paho.golang v0.12.0
github.com/go-jose/go-jose/v3 v3.0.3
- github.com/golang/protobuf v1.5.4
github.com/google/go-cmp v0.6.0
github.com/google/gofuzz v1.2.0
- github.com/google/mako v0.0.0-20190821191249-122f8dcef9e3
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.1
github.com/hashicorp/go-cleanhttp v0.5.1
@@ -29,9 +27,7 @@ require (
github.com/pkg/errors v0.9.1
github.com/rickb777/date v1.13.0
github.com/robfig/cron/v3 v3.0.1
- github.com/rogpeppe/fastuuid v1.2.0
github.com/stretchr/testify v1.9.0
- github.com/tsenart/vegeta/v12 v12.11.1
github.com/wavesoftware/go-ensure v1.0.0
go.opencensus.io v0.24.0
go.opentelemetry.io/otel v1.24.0
@@ -41,8 +37,6 @@ require (
go.uber.org/zap v1.27.0
golang.org/x/net v0.26.0
golang.org/x/sync v0.7.0
- google.golang.org/grpc v1.64.0
- google.golang.org/protobuf v1.34.1
k8s.io/api v0.29.2
k8s.io/apiextensions-apiserver v0.29.2
k8s.io/apimachinery v0.29.2
@@ -51,8 +45,8 @@ require (
k8s.io/utils v0.0.0-20240102154912-e7106e64919e
knative.dev/hack v0.0.0-20240607132042-09143140a254
knative.dev/hack/schema v0.0.0-20240607132042-09143140a254
- knative.dev/pkg v0.0.0-20240614135239-339c22b8218c
- knative.dev/reconciler-test v0.0.0-20240611155001-199a5264927d
+ knative.dev/pkg v0.0.0-20240620153518-fe2597ab85a0
+ knative.dev/reconciler-test v0.0.0-20240618170853-5bf0b86114f8
sigs.k8s.io/yaml v1.4.0
)
@@ -62,7 +56,7 @@ require (
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect
cloud.google.com/go/iam v1.1.8 // indirect
- cloud.google.com/go/storage v1.41.0 // indirect
+ cloud.google.com/go/storage v1.42.0 // indirect
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect
contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect
contrib.go.opencensus.io/exporter/zipkin v0.1.2 // indirect
@@ -86,18 +80,15 @@ require (
github.com/go-openapi/swag v0.22.3 // indirect
github.com/gobuffalo/flect v1.0.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/glog v1.2.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
- github.com/google/go-github/v27 v27.0.6 // indirect
- github.com/google/go-querystring v1.0.0 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.4 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/imdario/mergo v0.3.9 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
- github.com/influxdata/tdigest v0.0.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@@ -111,7 +102,6 @@ require (
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/statsd_exporter v0.22.7 // indirect
github.com/rickb777/plural v1.2.1 // indirect
- github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/cobra v1.7.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
@@ -131,8 +121,10 @@ require (
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/api v0.183.0 // indirect
google.golang.org/genproto v0.0.0-20240528184218-531527333157 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
+ google.golang.org/grpc v1.64.0 // indirect
+ google.golang.org/protobuf v1.34.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index 3bfa4c71fdc..c9ad61b752f 100644
--- a/go.sum
+++ b/go.sum
@@ -31,6 +31,8 @@ cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0=
cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE=
+cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU=
+cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -40,8 +42,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0=
-cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80=
+cloud.google.com/go/storage v1.42.0 h1:4QtGpplCVt1wz6g5o1ifXd656P5z+yNgzdw1tVfp0cU=
+cloud.google.com/go/storage v1.42.0/go.mod h1:HjMXRFq65pGKFn6hxj6x3HCyR41uSB72Z0SO/Vn6JFQ=
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h1:LblfooH1lKOpp1hIhukktmSAxFkqMPFk9KR6iZ0MJNI=
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY=
contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg=
@@ -73,8 +75,6 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE=
github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc=
-github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e h1:mWOqoK5jV13ChKf/aF3plwQ96laasTJgZi4f1aSOu+M=
-github.com/bmizerany/perks v0.0.0-20230307044200-03f9df79da1e/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
@@ -108,8 +108,6 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654 h1:XOPLOMn/zT4jIgxfxSsoXPxkrzz0FaCHwp33x5POJ+Q=
-github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654/go.mod h1:qm+vckxRlDt0aOla0RYJJVeqHZlWfOm2UIxHaqPB46E=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
@@ -221,16 +219,10 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-github/v27 v27.0.6 h1:oiOZuBmGHvrGM1X9uNUAUlLgp5r1UUO/M/KnbHnLRlQ=
-github.com/google/go-github/v27 v27.0.6/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0=
-github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
-github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/mako v0.0.0-20190821191249-122f8dcef9e3 h1:/o5e44nTD/QEEiWPGSFT3bSqcq3Qg7q27N9bv4gKh5M=
-github.com/google/mako v0.0.0-20190821191249-122f8dcef9e3/go.mod h1:YzLcVlL+NqWnmUEPuhS1LxDDwGO9WNbVlEXaF4IH35g=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@@ -282,8 +274,6 @@ github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/influxdata/tdigest v0.0.1 h1:XpFptwYmnEKUqmkcDjrzffswZ3nvNeevbUSLPP/ZzIY=
-github.com/influxdata/tdigest v0.0.1/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@@ -404,13 +394,10 @@ github.com/rickb777/plural v1.2.1 h1:UitRAgR70+yHFt26Tmj/F9dU9aV6UfjGXSbO1DcC9/U
github.com/rickb777/plural v1.2.1/go.mod h1:j058+3M5QQFgcZZ2oKIOekcygoZUL8gKW5yRO14BuAw=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
-github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
-github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 h1:Lt9DzQALzHoDwMBGJ6v8ObDPR0dzr2a6sXTB1Fq7IHs=
-github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
@@ -431,8 +418,6 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/sqs/goreturns v0.0.0-20181028201513-538ac6014518/go.mod h1:CKI4AZ4XmGV240rTHfO0hfE83S6/a3/Q1siZJ/vXf7A=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
-github.com/streadway/quantile v0.0.0-20220407130108-4246515d968d h1:X4+kt6zM/OVO6gbJdAfJR60MGPsqCzbtXNnjoGqdfAs=
-github.com/streadway/quantile v0.0.0-20220407130108-4246515d968d/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -448,8 +433,6 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc=
-github.com/tsenart/vegeta/v12 v12.11.1 h1:Rbwe7Zxr7sJ+BDTReemeQalYPvKiSV+O7nwmUs20B3E=
-github.com/tsenart/vegeta/v12 v12.11.1/go.mod h1:swiFmrgpqj2llHURgHYFRFN0tfrIrlnspg01HjwOnSQ=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
@@ -505,7 +488,6 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
-golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -516,8 +498,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw=
-golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -683,7 +663,6 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
-golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -740,9 +719,6 @@ golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSm
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
-gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca h1:PupagGYwj8+I4ubCxcmcBRk3VlUWtTg5huQpZR9flmE=
-gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
-gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -801,8 +777,8 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE=
google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ=
-google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e h1:SkdGTrROJl2jRGT/Fxv5QUf9jtdKCQh4KQJXbXVLAi0=
-google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e/go.mod h1:LweJcLbyVij6rCex8YunD8DYR5VDonap/jYl3ZRxcIU=
+google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU=
+google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -896,12 +872,10 @@ knative.dev/hack v0.0.0-20240607132042-09143140a254 h1:1YFnu3U6dWZg0oxm6GU8kEdA9
knative.dev/hack v0.0.0-20240607132042-09143140a254/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
knative.dev/hack/schema v0.0.0-20240607132042-09143140a254 h1:b9hFHGtxx0Kpm4EEjSD72lL0jms91To3OEVBTbqfOYI=
knative.dev/hack/schema v0.0.0-20240607132042-09143140a254/go.mod h1:3pWwBLnTZSM9psSgCAvhKOHIPTzqfEMlWRpDu6IYhK0=
-knative.dev/pkg v0.0.0-20240614135239-339c22b8218c h1:OaKrY7L6rzWTvs51JlieJajL40F6CpBbvO1aZspg2EA=
-knative.dev/pkg v0.0.0-20240614135239-339c22b8218c/go.mod h1:l7R8/SteYph0mZDsVgq3fVs4mWp1DaYx9BJJX68U6ik=
-knative.dev/reconciler-test v0.0.0-20240611155001-199a5264927d h1:FBpgtMooLXWfl8QjGNVEosw9QGPhJzkPip+x5jBVrT8=
-knative.dev/reconciler-test v0.0.0-20240611155001-199a5264927d/go.mod h1:iKOTdGVwm+SmVA/blgirYTdYU/Kw3Znj2arDYLlhoXw=
-pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw=
-pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
+knative.dev/pkg v0.0.0-20240620153518-fe2597ab85a0 h1:am9mP1UX4fONtm7WG/ew8bAgvQp4PaIRkRDRZDJPA6E=
+knative.dev/pkg v0.0.0-20240620153518-fe2597ab85a0/go.mod h1:sG+GSK2d4Cco77z/HMeVn5zMigg8fdGKi06/biqUMy0=
+knative.dev/reconciler-test v0.0.0-20240618170853-5bf0b86114f8 h1:A+rsitEiTX3GudM51g7zUMza+Ripj+boncmlJ2jZp50=
+knative.dev/reconciler-test v0.0.0-20240618170853-5bf0b86114f8/go.mod h1:2uUx3U6kdIzgJgMGgrGmdDdcFrFiex/DjuI2gM7Tte8=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/hack/tools.go b/hack/tools.go
index c90b7b97850..622e5c32556 100644
--- a/hack/tools.go
+++ b/hack/tools.go
@@ -26,7 +26,6 @@ import (
// Needed for the storage version too.
_ "knative.dev/pkg/apiextensions/storageversion/cmd/migrate"
- _ "knative.dev/pkg/test/mako/stub-sidecar"
// For chaos testing the leaderelection stuff.
_ "knative.dev/pkg/leaderelection/chaosduck"
diff --git a/test/performance/README.md b/test/performance/README.md
deleted file mode 100644
index 3bf56cb6332..00000000000
--- a/test/performance/README.md
+++ /dev/null
@@ -1,123 +0,0 @@
-# Knative Eventing Performance Tests
-
-## Configuring your cluster to run a benchmark
-
-1. Create a namespace `perf-eventing` if it does not exist. To use a different
- namespace, please replace all the namespaces in all bash commands and yaml
- configuration files with your choice.
-
-1. Install Knative eventing and components used in the performance test, such as
- MT broker, by following the steps in
- https://github.com/knative/eventing/blob/main/DEVELOPMENT.md.
-
-1. Create a ConfigMap called `config-mako` in your chosen namespace containing
- the Mako config file.
-
-```
-kubectl create configmap -n perf-eventing config-mako --from-file=test/performance/benchmarks//dev.config
-```
-
-1. Optionally edit the ConfigMap to set additional keys.
-
- ```
- kubectl edit configmap -n perf-eventing config-mako
- ```
-
-[`NewConfigFromMap`](https://github.com/knative/pkg/blob/main/test/mako/config/configmap.go#L53)
-determines the valid keys in this ConfigMap. Current keys are:
-
-- `environment`: Select a Mako config file in the ConfigMap. E.g.
- `environment: dev` corresponds to `dev.config`.
-- `additionalTags`: Comma-separated list of tags to apply to the Mako run.
-
-### Run benchmarks continuously in CI using Mako
-
-To run a benchmark continuously, and make the result available on
-[Mako](https://mako.dev/project?name=Knative):
-
-1. Use `ko` to apply yaml files in the benchmark directory.
-
-
- ```
- ko apply -f test/performance/benchmarks/broker-imc/200-broker-imc-continuous-load-setup.yaml
- ```
-
-### Run without Mako
-
-To run a benchmark once, and use the result from `mako-stub` for plotting:
-
-1. Install the eventing resources for attacking:
-
- ```
- ko apply -f test/performance/benchmarks/broker-imc/100-broker-perf-setup.yaml
- ```
-
-1. Start the benchmarking job:
-
- ```
- ko apply -f test/performance/benchmarks/broker-imc/300-broker-imc-increasing-load-setup.yaml
- ```
-
-1. Wait until all the pods in namespace `perf-eventing` are completed.
-
-1. Retrieve results from mako-stub using the script in
- [knative/pkg](https://github.com/knative/pkg/blob/main/test/mako/stub-sidecar/read_results.sh)
- where `pod_name` is the name of the aggregator pod:
-
- ```
- bash "$GOPATH/src/knative.dev/eventing/vendor/knative.dev/pkg/test/mako/stub-sidecar/read_results.sh" "$pod_name" perf-eventing ${mako_port:-10001} ${timeout:-120} ${retries:-100} ${retries_interval:-10} "$output_file"
- ```
-
- This will download a CSV with all raw results. Alternatively you can remove
- the port argument `-p` in `mako-stub` container to dump the output to
- container log directly.
-
-## Available benchmarks
-
-- `direct`: Source -> Sink (baseline test)
-- `broker-imc`: Source -> Broker with IMC -> Sink
-- `channel-imc`: Source -> IMC -> Sink
-
-## Plotting results from mako-stub
-
-In order to plot results from the mako-stub, you need to have installed
-`gnuplot`.
-
-Three plot scripts are available:
-
-- Only send/receive latencies
-- Only send/receive throughput
-- Combined send/receive throughput
-
-To use them, you need to pass as first parameter the csv. If you want to use the
-combined plot script, you need to specify also latency upper bound, thpt lower
-and upper bound to show. For example:
-
-```
-gnuplot -c test/performance/latency-and-thpt-plot.plg data.csv 0.5 0 1100
-```
-
-> * `0.5` is the time in seconds, and it is the max allowed size for the y1 axis
-> * `0` and `1100` are the message throughput, and it they represent the min and max boundaries of the y2 axis
-
-## Profiling
-
-Most eventing binaries under `cmd` package are bootstrapped by either
-`sharedmain.Main` in `knative.dev/pkg/injection/sharedmain` or `adapter.Main` in
-`knative.dev/eventing/pkg/adapter`. These `Main` helper functions uses the
-[profiling](https://github.com/knative/pkg/blob/main/profiling/server.go)
-package to enable golang profiling by reading the `profiling.enable` flag in the
-`config-observability` configmap.
-
-To enable profiling,
-
-1. Add or modify `profiling.enable: "true"` in
- `config/config-observability.yaml`'s `data` field and apply the change. Or
- use `kubectl edit configmap -n knative-eventing config-observability`.
-2. Port forward into the pod which you want to profile, e.g.,
- `kubectl port-forward 8008:8008`
-3. Point your browser to `http://localhost:8008/debug/pprof/` and view pprof
- data.
-
-After you are done, you can disable profiling by setting
-`profiling.enable: "false"`.
diff --git a/test/performance/benchmarks/broker-imc/100-broker-perf-setup.yaml b/test/performance/benchmarks/broker-imc/100-broker-perf-setup.yaml
deleted file mode 100644
index 82a9386cc73..00000000000
--- a/test/performance/benchmarks/broker-imc/100-broker-perf-setup.yaml
+++ /dev/null
@@ -1,174 +0,0 @@
-# Copyright 2019 The Knative Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Namespace
-metadata:
- name: perf-eventing
-
----
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: config-broker
- namespace: perf-eventing
-data:
- channel-template-spec: |
- apiVersion: messaging.knative.dev/v1
- kind: InMemoryChannel
-
----
-
-apiVersion: eventing.knative.dev/v1
-kind: Broker
-metadata:
- name: in-memory-test-broker
- namespace: perf-eventing
- annotations:
- eventing.knative.dev/broker.class: MTChannelBasedBroker
-spec:
- config:
- apiVersion: v1
- kind: ConfigMap
- name: config-broker
-
----
-
-apiVersion: eventing.knative.dev/v1
-kind: Trigger
-metadata:
- name: broker-perf
- namespace: perf-eventing
-spec:
- broker: in-memory-test-broker
- subscriber:
- ref:
- apiVersion: v1
- kind: Service
- name: broker-perf-consumer
-
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: eventing-broker-ingress
- namespace: perf-eventing
-
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: eventing-broker-filter
- namespace: perf-eventing
-
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: perf-eventing
- namespace: perf-eventing
-
----
-
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: perf-eventing
-rules:
- - apiGroups: [""]
- resources: ["nodes", "pods"]
- verbs: ["list"]
-
----
-
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: perf-eventing
-subjects:
- - kind: ServiceAccount
- name: perf-eventing
- namespace: perf-eventing
-roleRef:
- kind: ClusterRole
- name: perf-eventing
- apiGroup: rbac.authorization.k8s.io
-
----
-
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: eventing-broker-ingress
- namespace: perf-eventing
-subjects:
- - kind: ServiceAccount
- name: eventing-broker-ingress
- namespace: perf-eventing
-roleRef:
- kind: ClusterRole
- name: eventing-broker-ingress
- apiGroup: rbac.authorization.k8s.io
-
----
-
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
- name: eventing-broker-filter
- namespace: perf-eventing
-subjects:
- - kind: ServiceAccount
- name: eventing-broker-filter
- namespace: perf-eventing
-roleRef:
- kind: ClusterRole
- name: eventing-broker-filter
- apiGroup: rbac.authorization.k8s.io
-
----
-
-apiVersion: v1
-kind: Service
-metadata:
- name: broker-perf-consumer
- namespace: perf-eventing
-spec:
- selector:
- role: broker-perf-consumer
- ports:
- - name: http
- port: 80
- targetPort: cloudevents
- protocol: TCP
-
----
-
-apiVersion: v1
-kind: Service
-metadata:
- name: broker-perf-aggregator
- namespace: perf-eventing
-spec:
- selector:
- role: broker-perf-aggregator
- ports:
- - name: grpc
- port: 10000
- targetPort: grpc
- protocol: TCP
diff --git a/test/performance/benchmarks/broker-imc/200-broker-imc-continuous-load-setup.yaml b/test/performance/benchmarks/broker-imc/200-broker-imc-continuous-load-setup.yaml
deleted file mode 100644
index 7f56e8afb48..00000000000
--- a/test/performance/benchmarks/broker-imc/200-broker-imc-continuous-load-setup.yaml
+++ /dev/null
@@ -1,161 +0,0 @@
-# Copyright 2019 The Knative Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: batch/v1
-kind: CronJob
-metadata:
- name: broker-imc-sender-receiver
- namespace: perf-eventing
-spec:
- schedule: "0/15 * * * *"
- # History must be zero to ensure no failed pods stick around and block the next job
- successfulJobsHistoryLimit: 0
- failedJobsHistoryLimit: 0
- jobTemplate:
- spec:
- completions: 1
- parallelism: 1
- backoffLimit: 0
- # Allow up to 14 minutes, then clean up to make room for the next attempt
- activeDeadlineSeconds: 840
- template:
- metadata:
- labels:
- role: broker-imc-receiver
- spec:
- serviceAccountName: perf-eventing
- restartPolicy: Never
- containers:
- - name: sender
- image: ko://knative.dev/eventing/test/test_images/performance
- args:
- - "--roles=sender"
- - "--sink=http://broker-ingress.knative-eventing.svc.cluster.local/perf-eventing/in-memory-test-broker"
- - "--aggregator=broker-imc-aggregator:10000"
- - "--pace=100:10,400:20,800:30,900:60,1000:60,1100:60,1200:60"
- env:
- - name: GOGC
- value: "off"
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- resources:
- requests:
- cpu: 1200m
- memory: 3Gi
- - name: receiver
- image: ko://knative.dev/eventing/test/test_images/performance
- args:
- - "--roles=receiver"
- - "--aggregator=broker-imc-aggregator:10000"
- - "--pace=100:10,400:20,800:30,900:60,1000:60,1100:60,1200:60"
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- resources:
- requests:
- cpu: 1200m
- memory: 3Gi
- ports:
- - name: cloudevents
- containerPort: 8080
-
----
-
-apiVersion: batch/v1
-kind: CronJob
-metadata:
- name: broker-imc-aggregator
- namespace: perf-eventing
-spec:
- schedule: "0/15 * * * *"
- # History must be zero to ensure no failed pods stick around and block the next job
- successfulJobsHistoryLimit: 0
- failedJobsHistoryLimit: 0
- jobTemplate:
- spec:
- completions: 1
- parallelism: 1
- backoffLimit: 0
- # Allow up to 14 minutes, then clean up to make room for the next attempt
- activeDeadlineSeconds: 840
- template:
- metadata:
- labels:
- role: broker-imc-aggregator
- spec:
- serviceAccountName: perf-eventing
- restartPolicy: Never
- containers:
- - name: aggregator
- image: ko://knative.dev/eventing/test/test_images/performance
- args:
- - "--roles=aggregator"
- # set to the number of sender + receiver (same image that does both counts 2)
- - "--expect-records=2"
- - "--mako-tags=channel=imc"
- env:
- - name: GOGC
- value: "off"
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- ports:
- - name: grpc
- containerPort: 10000
- resources:
- requests:
- cpu: 1000m
- memory: 2Gi
- volumeMounts:
- - name: config-mako
- mountPath: /etc/config-mako
- - name: mako-secrets
- mountPath: /var/secret
- terminationMessagePolicy: FallbackToLogsOnError
- - name: mako
- image: gcr.io/knative-tests/test-infra/mako-microservice:latest
- env:
- - name: GOOGLE_APPLICATION_CREDENTIALS
- value: /var/secret/robot.json
- volumeMounts:
- - name: mako-secrets
- mountPath: /var/secret
- ports:
- - name: quickstore
- containerPort: 9813
- terminationMessagePolicy: FallbackToLogsOnError
- volumes:
- - name: config-mako
- configMap:
- name: config-mako
- - name: mako-secrets
- secret:
- secretName: mako-secrets
diff --git a/test/performance/benchmarks/broker-imc/300-broker-imc-increasing-load-setup.yaml b/test/performance/benchmarks/broker-imc/300-broker-imc-increasing-load-setup.yaml
deleted file mode 100644
index 6a53dd6d916..00000000000
--- a/test/performance/benchmarks/broker-imc/300-broker-imc-increasing-load-setup.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright 2019 The Knative Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: batch/v1
-kind: Job
-metadata:
- name: broker-perf-send-receive
- namespace: perf-eventing
- labels:
- role: broker-perf-consumer
-spec:
- completions: 1
- parallelism: 1
- backoffLimit: 0
- template:
- metadata:
- labels:
- role: broker-perf-consumer
- spec:
- serviceAccountName: perf-eventing
- restartPolicy: Never
- containers:
- - name: sender-receiver
- image: ko://knative.dev/eventing/test/test_images/performance
- args:
- - "--roles=sender,receiver"
- - "--sink=http://broker-ingress.knative-eventing.svc.cluster.local/perf-eventing/in-memory-test-broker"
- - "--aggregator=broker-perf-aggregator:10000"
- - "--pace=500:30,600:30,700:30,800:30,900:30,1000:30"
- env:
- - name: GOGC
- value: "off"
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- resources:
- requests:
- cpu: 1000m
- memory: 2Gi
- ports:
- - name: cloudevents
- containerPort: 8080
-
----
-
-apiVersion: v1
-kind: Pod
-metadata:
- name: broker-perf-aggregator
- namespace: perf-eventing
- labels:
- role: broker-perf-aggregator
-spec:
- serviceAccountName: perf-eventing
- restartPolicy: Never
- containers:
- - name: aggregator
- image: ko://knative.dev/eventing/test/test_images/performance
- args:
- - "--roles=aggregator"
- # set to the number of sender + receiver (same image that does both counts 2)
- - "--expect-records=2"
- env:
- - name: GOGC
- value: "off"
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- ports:
- - name: grpc
- containerPort: 10000
- resources:
- requests:
- cpu: 1000m
- memory: 2Gi
- volumeMounts:
- - name: config-mako
- mountPath: /etc/config-mako
- terminationMessagePolicy: FallbackToLogsOnError
- - name: mako-stub
- image: ko://knative.dev/pkg/test/mako/stub-sidecar
- args:
- - "-p=10001"
- ports:
- - name: quickstore
- containerPort: 9813
- volumeMounts:
- - name: config-mako
- mountPath: /etc/config-mako
- terminationMessagePolicy: FallbackToLogsOnError
- resources:
- requests:
- memory: 4Gi
- volumes:
- - name: config-mako
- configMap:
- name: config-mako
diff --git a/test/performance/benchmarks/broker-imc/cluster.yaml b/test/performance/benchmarks/broker-imc/cluster.yaml
deleted file mode 100644
index abb83cea95a..00000000000
--- a/test/performance/benchmarks/broker-imc/cluster.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2019 The Knative Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Configuration file for the cluster that runs this benchmark continuously.
-
-GKECluster:
- location: "us-central1"
- nodeCount: 1
- nodeType: "e2-standard-4"
- addons: "HorizontalPodAutoscaling,HttpLoadBalancing"
diff --git a/test/performance/benchmarks/broker-imc/dev.config b/test/performance/benchmarks/broker-imc/dev.config
deleted file mode 100644
index 0707193950f..00000000000
--- a/test/performance/benchmarks/broker-imc/dev.config
+++ /dev/null
@@ -1,70 +0,0 @@
-# Create this benchmark with the mako tool: mako create_benchmark dev.config
-# Update this benchmark with the mako tool: mako update_benchmark dev.config
-# Learn more about the mako tool at
-# https://github.com/google/mako/blob/master/docs/CLI.md.
-
-project_name: "Knative"
-benchmark_name: "Development - IMC Broker Latency & Throughput"
-description: "Measure latency and throughput of the broker using various channels."
-benchmark_key: '6552586517741568'
-
-# Human owners that can update the benchmark.
-owner_list: "grantrodgers@google.com"
-owner_list: "chizhg@google.com"
-owner_list: "xiyue@google.com"
-owner_list: "gracegao@google.com"
-owner_list: "nachocano@google.com"
-owner_list: "cshou@google.com"
-owner_list: "ngiraldo@google.com"
-owner_list: "zhongduo@google.com"
-
-# GCP Service Accounts that can publish data to Mako.
-owner_list: "mako-job@knative-performance.iam.gserviceaccount.com"
-owner_list: "mako-upload@grantrodgers-crd.iam.gserviceaccount.com"
-owner_list: "mako-upload@xiyue-knative-project.iam.gserviceaccount.com"
-owner_list: "mako-upload@gracegao-knative-gcp-testing.iam.gserviceaccount.com"
-owner_list: "mako-upload@knative-project-228222.iam.gserviceaccount.com"
-owner_list: "mako-upload@cshou-playground.iam.gserviceaccount.com"
-owner_list: "mako-upload@ngiraldo-knative-dev.iam.gserviceaccount.com"
-owner_list: "mako-upload@jimmy-knative-dev.iam.gserviceaccount.com"
-
-# Define the name and type for x-axis of run charts
-input_value_info: {
- value_key: "t"
- label: "time"
- type: TIMESTAMP
-}
-
-# Note: value_key is stored repeatedly and should be very short (ideally one or two characters).
-metric_info_list: {
- value_key: "pl"
- label: "publish-latency"
-}
-metric_info_list: {
- value_key: "pe"
- label: "publish-errors"
-}
-metric_info_list: {
- value_key: "st"
- label: "send-throughput"
-}
-metric_info_list: {
- value_key: "dl"
- label: "deliver-latency"
-}
-metric_info_list: {
- value_key: "de"
- label: "deliver-errors"
-}
-metric_info_list: {
- value_key: "dt"
- label: "deliver-throughput"
-}
-metric_info_list: {
- value_key: "pet"
- label: "publish-failure-throughput"
-}
-metric_info_list: {
- value_key: "det"
- label: "deliver-failure-throughput"
-}
diff --git a/test/performance/benchmarks/broker-imc/prod.config b/test/performance/benchmarks/broker-imc/prod.config
deleted file mode 100644
index 17758c711fe..00000000000
--- a/test/performance/benchmarks/broker-imc/prod.config
+++ /dev/null
@@ -1,64 +0,0 @@
-# Create this benchmark with the mako tool: mako create_benchmark prod.config
-# Update this benchmark with the mako tool: mako update_benchmark prod.config
-# Learn more about the mako tool at
-# https://github.com/google/mako/blob/master/docs/CLI.md.
-
-project_name: "Knative"
-benchmark_name: "IMC Broker Latency & Throughput"
-description: "Measure latency and throughput of the broker using various channels."
-benchmark_key: '5903682180743168'
-
-# Human owners that can update the benchmark.
-owner_list: "grantrodgers@google.com"
-owner_list: "chizhg@google.com"
-owner_list: "xiyue@google.com"
-owner_list: "gracegao@google.com"
-owner_list: "nachocano@google.com"
-owner_list: "cshou@google.com"
-owner_list: "ngiraldo@google.com"
-owner_list: "zhongduo@google.com"
-
-# GCP Service Accounts that can publish data to Mako. Since this is a prod
-# benchmark, only the CI account should be listed here.
-owner_list: "mako-job@knative-performance.iam.gserviceaccount.com"
-
-# Define the name and type for x-axis of run charts
-input_value_info: {
- value_key: "t"
- label: "time"
- type: TIMESTAMP
-}
-
-# Note: value_key is stored repeatedly and should be very short (ideally one or two characters).
-metric_info_list: {
- value_key: "pl"
- label: "publish-latency"
-}
-metric_info_list: {
- value_key: "pe"
- label: "publish-errors"
-}
-metric_info_list: {
- value_key: "st"
- label: "send-throughput"
-}
-metric_info_list: {
- value_key: "dl"
- label: "deliver-latency"
-}
-metric_info_list: {
- value_key: "de"
- label: "deliver-errors"
-}
-metric_info_list: {
- value_key: "dt"
- label: "deliver-throughput"
-}
-metric_info_list: {
- value_key: "pet"
- label: "publish-failure-throughput"
-}
-metric_info_list: {
- value_key: "det"
- label: "deliver-failure-throughput"
-}
diff --git a/test/performance/benchmarks/channel-imc/100-channel-perf-setup.yaml b/test/performance/benchmarks/channel-imc/100-channel-perf-setup.yaml
deleted file mode 100644
index 6531ef0c945..00000000000
--- a/test/performance/benchmarks/channel-imc/100-channel-perf-setup.yaml
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright 2019 The Knative Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Namespace
-metadata:
- name: perf-eventing
-
----
-
-apiVersion: v1
-kind: Service
-metadata:
- name: channel-perf-consumer
- namespace: perf-eventing
-spec:
- selector:
- role: channel-perf-consumer
- ports:
- - protocol: TCP
- port: 80
- targetPort: cloudevents
- name: http
-
----
-
-apiVersion: v1
-kind: Service
-metadata:
- name: channel-perf-aggregator
- namespace: perf-eventing
-spec:
- selector:
- role: channel-perf-aggregator
- ports:
- - name: grpc
- port: 10000
- targetPort: grpc
- protocol: TCP
-
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: perf-eventing
- namespace: perf-eventing
-
----
-
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: perf-eventing
-rules:
- - apiGroups: [""]
- resources: ["nodes", "pods"]
- verbs: ["list"]
-
----
-
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: perf-eventing
-subjects:
- - kind: ServiceAccount
- name: perf-eventing
- namespace: perf-eventing
-roleRef:
- kind: ClusterRole
- name: perf-eventing
- apiGroup: rbac.authorization.k8s.io
-
----
-
-apiVersion: messaging.knative.dev/v1
-kind: InMemoryChannel
-metadata:
- name: in-memory-test-channel
- namespace: perf-eventing
-
----
-
-
-apiVersion: messaging.knative.dev/v1
-kind: Subscription
-metadata:
- name: in-memory-test-channel-sub
- namespace: perf-eventing
-spec:
- channel:
- apiVersion: messaging.knative.dev/v1
- kind: InMemoryChannel
- name: in-memory-test-channel
- subscriber:
- uri: "http://channel-perf-consumer.perf-eventing.svc.cluster.local"
diff --git a/test/performance/benchmarks/channel-imc/200-channel-imc-continuous-load-setup.yaml b/test/performance/benchmarks/channel-imc/200-channel-imc-continuous-load-setup.yaml
deleted file mode 100644
index 791f46d3c0b..00000000000
--- a/test/performance/benchmarks/channel-imc/200-channel-imc-continuous-load-setup.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
-# Copyright 2019 The Knative Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: batch/v1
-kind: CronJob
-metadata:
- name: channel-imc-sender-receiver
- namespace: perf-eventing
-spec:
- schedule: "0/15 * * * *"
- # History must be zero to ensure no failed pods stick around and block the next job
- successfulJobsHistoryLimit: 0
- failedJobsHistoryLimit: 0
- jobTemplate:
- spec:
- completions: 1
- parallelism: 1
- backoffLimit: 0
- # Allow up to 14 minutes, then clean up to make room for the next attempt
- activeDeadlineSeconds: 840
- template:
- metadata:
- labels:
- role: channel-imc-consumer
- spec:
- serviceAccountName: perf-eventing
- restartPolicy: Never
- containers:
- - name: sender
- image: ko://knative.dev/eventing/test/test_images/performance
- args:
- - "--roles=sender"
- - "--aggregator=channel-imc-aggregator:10000"
- - "--sink=http://channel-imc-kn-channel.perf-eventing.svc.cluster.local"
- - "--pace=100:10,400:20,800:30,900:60,1000:60,1100:60,1200:60,1500:60"
- env:
- - name: GOGC
- value: "off"
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- resources:
- requests:
- cpu: 1200m
- memory: 3Gi
- - name: receiver
- image: ko://knative.dev/eventing/test/test_images/performance
- args:
- - "--roles=receiver"
- - "--aggregator=channel-imc-aggregator:10000"
- - "--pace=100:10,400:20,800:30,900:60,1000:60,1100:60,1200:60,1500:60"
- env:
- - name: GOGC
- value: "off"
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- resources:
- requests:
- cpu: 1200m
- memory: 3Gi
- ports:
- - name: cloudevents
- containerPort: 8080
-
----
-
-apiVersion: batch/v1
-kind: CronJob
-metadata:
- name: channel-imc-aggregator
- namespace: perf-eventing
-spec:
- schedule: "0/15 * * * *"
- # History must be zero to ensure no failed pods stick around and block the next job
- successfulJobsHistoryLimit: 0
- failedJobsHistoryLimit: 0
- jobTemplate:
- spec:
- completions: 1
- parallelism: 1
- backoffLimit: 0
- # Allow up to 14 minutes, then clean up to make room for the next attempt
- activeDeadlineSeconds: 840
- template:
- metadata:
- labels:
- role: channel-imc-aggregator
- spec:
- serviceAccountName: perf-eventing
- restartPolicy: Never
- containers:
- - name: aggregator
- image: ko://knative.dev/eventing/test/test_images/performance
- args:
- - "--roles=aggregator"
- # set to the number of sender + receiver (same image that does both counts 2)
- - "--expect-records=2"
- - "--mako-tags=channel=imc"
- ports:
- - name: grpc
- containerPort: 10000
- resources:
- requests:
- cpu: 1000m
- memory: 2Gi
- volumeMounts:
- - name: config-mako
- mountPath: /etc/config-mako
- - name: mako-secrets
- mountPath: /var/secret
- terminationMessagePolicy: FallbackToLogsOnError
- - name: mako
- image: gcr.io/knative-tests/test-infra/mako-microservice:latest
- env:
- - name: GOOGLE_APPLICATION_CREDENTIALS
- value: /var/secret/robot.json
- volumeMounts:
- - name: mako-secrets
- mountPath: /var/secret
- ports:
- - name: quickstore
- containerPort: 9813
- terminationMessagePolicy: FallbackToLogsOnError
- volumes:
- - name: config-mako
- configMap:
- name: config-mako
- - name: mako-secrets
- secret:
- secretName: mako-secrets
diff --git a/test/performance/benchmarks/channel-imc/300-channel-imc-constant-load-setup.yaml b/test/performance/benchmarks/channel-imc/300-channel-imc-constant-load-setup.yaml
deleted file mode 100644
index 48b91c5552b..00000000000
--- a/test/performance/benchmarks/channel-imc/300-channel-imc-constant-load-setup.yaml
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 2019 The Knative Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: batch/v1
-kind: Job
-metadata:
- name: channel-perf-send-receive
- namespace: perf-eventing
- labels:
- role: channel-perf-consumer
-spec:
- completions: 1
- parallelism: 1
- backoffLimit: 0
- template:
- metadata:
- labels:
- role: channel-perf-consumer
- spec:
- serviceAccountName: perf-eventing
- restartPolicy: Never
- containers:
- - name: sender-receiver
- image: ko://knative.dev/eventing/test/test_images/performance
- args:
- - "--roles=sender,receiver"
- - "--sink=http://in-memory-test-channel-kn-channel.perf-eventing.svc.cluster.local"
- - "--aggregator=channel-perf-aggregator:10000"
- - "--pace=1000:30"
- - "--warmup=0"
- env:
- - name: GOGC
- value: "off"
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- resources:
- requests:
- cpu: 1000m
- memory: 6Gi
- ports:
- - name: cloudevents
- containerPort: 8080
-
----
-
-apiVersion: v1
-kind: Pod
-metadata:
- name: channel-perf-aggregator
- namespace: perf-eventing
- labels:
- role: channel-perf-aggregator
-spec:
- serviceAccountName: perf-eventing
- restartPolicy: Never
- containers:
- - name: aggregator
- image: ko://knative.dev/eventing/test/test_images/performance
- args:
- - "--roles=aggregator"
- # set to the number of sender + receiver (same image that does both counts 2)
- - "--expect-records=2"
- ports:
- - name: grpc
- containerPort: 10000
- env:
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- resources:
- requests:
- cpu: 1000m
- memory: 2Gi
- volumeMounts:
- - name: config-mako
- mountPath: /etc/config-mako
- terminationMessagePolicy: FallbackToLogsOnError
- - name: mako-stub
- image: ko://knative.dev/pkg/test/mako/stub-sidecar
- args:
- - "-p=10001"
- ports:
- - name: quickstore
- containerPort: 9813
- - name: results
- containerPort: 10001
- volumeMounts:
- - name: config-mako
- mountPath: /etc/config-mako
- terminationMessagePolicy: FallbackToLogsOnError
- volumes:
- - name: config-mako
- configMap:
- name: config-mako
diff --git a/test/performance/benchmarks/channel-imc/cluster.yaml b/test/performance/benchmarks/channel-imc/cluster.yaml
deleted file mode 100644
index abb83cea95a..00000000000
--- a/test/performance/benchmarks/channel-imc/cluster.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2019 The Knative Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Configuration file for the cluster that runs this benchmark continuously.
-
-GKECluster:
- location: "us-central1"
- nodeCount: 1
- nodeType: "e2-standard-4"
- addons: "HorizontalPodAutoscaling,HttpLoadBalancing"
diff --git a/test/performance/benchmarks/channel-imc/dev.config b/test/performance/benchmarks/channel-imc/dev.config
deleted file mode 100644
index 7d908cea74b..00000000000
--- a/test/performance/benchmarks/channel-imc/dev.config
+++ /dev/null
@@ -1,70 +0,0 @@
-# Create this benchmark with the mako tool: mako create_benchmark dev.config
-# Update this benchmark with the mako tool: mako update_benchmark dev.config
-# Learn more about the mako tool at
-# https://github.com/google/mako/blob/master/docs/CLI.md.
-
-project_name: "Knative"
-benchmark_name: "Development - IMC Channel Latency & Throughput"
-description: "Measure latency and throughput of channels."
-benchmark_key: '4926580618952704'
-
-# Human owners that can update the benchmark.
-owner_list: "grantrodgers@google.com"
-owner_list: "chizhg@google.com"
-owner_list: "xiyue@google.com"
-owner_list: "gracegao@google.com"
-owner_list: "nachocano@google.com"
-owner_list: "cshou@google.com"
-owner_list: "ngiraldo@google.com"
-owner_list: "zhongduo@google.com"
-
-# GCP Service Accounts that can publish data to Mako.
-owner_list: "mako-job@knative-performance.iam.gserviceaccount.com"
-owner_list: "mako-upload@grantrodgers-crd.iam.gserviceaccount.com"
-owner_list: "mako-upload@xiyue-knative-project.iam.gserviceaccount.com"
-owner_list: "mako-upload@gracegao-knative-gcp-testing.iam.gserviceaccount.com"
-owner_list: "mako-upload@knative-project-228222.iam.gserviceaccount.com"
-owner_list: "mako-upload@cshou-playground.iam.gserviceaccount.com"
-owner_list: "mako-upload@ngiraldo-knative-dev.iam.gserviceaccount.com"
-owner_list: "mako-upload@jimmy-knative-dev.iam.gserviceaccount.com"
-
-# Define the name and type for x-axis of run charts
-input_value_info: {
- value_key: "t"
- label: "time"
- type: TIMESTAMP
-}
-
-# Note: value_key is stored repeatedly and should be very short (ideally one or two characters).
-metric_info_list: {
- value_key: "pl"
- label: "publish-latency"
-}
-metric_info_list: {
- value_key: "pe"
- label: "publish-errors"
-}
-metric_info_list: {
- value_key: "st"
- label: "send-throughput"
-}
-metric_info_list: {
- value_key: "dl"
- label: "deliver-latency"
-}
-metric_info_list: {
- value_key: "de"
- label: "deliver-errors"
-}
-metric_info_list: {
- value_key: "dt"
- label: "deliver-throughput"
-}
-metric_info_list: {
- value_key: "pet"
- label: "publish-failure-throughput"
-}
-metric_info_list: {
- value_key: "det"
- label: "deliver-failure-throughput"
-}
diff --git a/test/performance/benchmarks/channel-imc/prod.config b/test/performance/benchmarks/channel-imc/prod.config
deleted file mode 100644
index c9eef5d76b5..00000000000
--- a/test/performance/benchmarks/channel-imc/prod.config
+++ /dev/null
@@ -1,64 +0,0 @@
-# Create this benchmark with the mako tool: mako create_benchmark prod.config
-# Update this benchmark with the mako tool: mako update_benchmark prod.config
-# Learn more about the mako tool at
-# https://github.com/google/mako/blob/master/docs/CLI.md.
-
-project_name: "Knative"
-benchmark_name: "IMC Channel Latency & Throughput"
-description: "Measure latency and throughput of channels."
-benchmark_key: '5683818216292352'
-
-# Human owners that can update the benchmark.
-owner_list: "grantrodgers@google.com"
-owner_list: "chizhg@google.com"
-owner_list: "xiyue@google.com"
-owner_list: "gracegao@google.com"
-owner_list: "nachocano@google.com"
-owner_list: "cshou@google.com"
-owner_list: "ngiraldo@google.com"
-owner_list: "zhongduo@google.com"
-
-# GCP Service Accounts that can publish data to Mako. Since this is a prod
-# benchmark, only the CI account should be listed here.
-owner_list: "mako-job@knative-performance.iam.gserviceaccount.com"
-
-# Define the name and type for x-axis of run charts
-input_value_info: {
- value_key: "t"
- label: "time"
- type: TIMESTAMP
-}
-
-# Note: value_key is stored repeatedly and should be very short (ideally one or two characters).
-metric_info_list: {
- value_key: "pl"
- label: "publish-latency"
-}
-metric_info_list: {
- value_key: "pe"
- label: "publish-errors"
-}
-metric_info_list: {
- value_key: "st"
- label: "send-throughput"
-}
-metric_info_list: {
- value_key: "dl"
- label: "deliver-latency"
-}
-metric_info_list: {
- value_key: "de"
- label: "deliver-errors"
-}
-metric_info_list: {
- value_key: "dt"
- label: "deliver-throughput"
-}
-metric_info_list: {
- value_key: "pet"
- label: "publish-failure-throughput"
-}
-metric_info_list: {
- value_key: "det"
- label: "deliver-failure-throughput"
-}
diff --git a/test/performance/config/config-mako.yaml b/test/performance/config/config-mako.yaml
deleted file mode 100644
index 57f1da2ea97..00000000000
--- a/test/performance/config/config-mako.yaml
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright 2019 The Knative Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: config-mako
-
-data:
- _example: |
- ################################
- # #
- # EXAMPLE CONFIGURATION #
- # #
- ################################
-
- # This block is not actually functional configuration,
- # but serves to illustrate the available configuration
- # options and document them in a way that is accessible
- # to users that `kubectl edit` this config map.
- #
- # These sample configuration options may be copied out of
- # this example block and unindented to be in the data block
- # to actually change the configuration.
-
- # The Mako environment in which we are running.
- # Only our performance automation should run in "prod", but
- # there should be a "dev" environment with a fairly broad
- # write ACL. Users can also develop against custom configurations
- # by adding `foo.config` under their benchmark's kodata directory.
- environment: dev
-
- # Additional tags to tag the runs. These tags are added
- # to the list that the binary itself publishes (Kubernetes version, etc).
- # It is a comma separated list of tags.
- additionalTags: "key=value,absolute"
-
- # Replace the value with config of the benchmark you want to run
- dev.config: |
- ### Creating this benchmark:
- # mako create_benchmark test/performance/channel-imc/dev.config
- ### Updating this benchmark:
- # mako update_benchmark test/performance/channel-imc/dev.config
-
- project_name: "Knative"
- benchmark_name: "Development - IMC Channel Latency & Throughput"
- description: "Measure latency and throughput of channels."
- benchmark_key: '4926580618952704'
-
- # Human owners for manual benchmark adjustments.
- owner_list: "grantrodgers@google.com"
- owner_list: "chizhg@google.com"
- owner_list: "xiyue@google.com"
- owner_list: "gracegao@google.com"
- owner_list: "nachocano@google.com"
- owner_list: "cshou@google.com"
- owner_list: "ngiraldo@google.com"
-
- # Anyone can add their IAM robot here to publish to this benchmark.
- owner_list: "mako-job@knative-performance.iam.gserviceaccount.com"
- # This is grantrodgers' robot:
- owner_list: "mako-upload@grantrodgers-crd.iam.gserviceaccount.com"
- owner_list: "mako-upload@xiyue-knative-project.iam.gserviceaccount.com"
- owner_list: "mako-upload@gracegao-knative-gcp-testing.iam.gserviceaccount.com"
- owner_list: "mako-upload@knative-project-228222.iam.gserviceaccount.com"
- owner_list: "mako-upload@cshou-playground.iam.gserviceaccount.com"
- owner_list: "mako-upload@ngiraldo-knative-dev.iam.gserviceaccount.com"
-
- # Define the name and type for x-axis of run charts
- input_value_info: {
- value_key: "t"
- label: "time"
- type: TIMESTAMP
- }
-
- # Note: value_key is stored repeatedly and should be very short (ideally one or two characters).
- metric_info_list: {
- value_key: "pl"
- label: "publish-latency"
- }
- metric_info_list: {
- value_key: "pe"
- label: "publish-errors"
- }
- metric_info_list: {
- value_key: "st"
- label: "send-throughput"
- }
- metric_info_list: {
- value_key: "dl"
- label: "deliver-latency"
- }
- metric_info_list: {
- value_key: "de"
- label: "deliver-errors"
- }
- metric_info_list: {
- value_key: "dt"
- label: "deliver-throughput"
- }
- metric_info_list: {
- value_key: "ft"
- label: "failure-throughput"
- }
diff --git a/test/performance/direct/100-direct-perf-setup.yaml b/test/performance/direct/100-direct-perf-setup.yaml
deleted file mode 100644
index 623b0dac28e..00000000000
--- a/test/performance/direct/100-direct-perf-setup.yaml
+++ /dev/null
@@ -1,178 +0,0 @@
-# Copyright 2019 The Knative Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: Namespace
-metadata:
- name: perf-eventing
-
----
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: config-mako
- namespace: perf-eventing
-data:
- _example: |
- ################################
- # #
- # EXAMPLE CONFIGURATION #
- # #
- ################################
-
- # This block is not actually functional configuration,
- # but serves to illustrate the available configuration
- # options and document them in a way that is accessible
- # to users that `kubectl edit` this config map.
- #
- # These sample configuration options may be copied out of
- # this example block and unindented to be in the data block
- # to actually change the configuration.
-
- # The Mako environment in which we are running.
- # Only our performance automation should run in "prod", but
- # there should be a "dev" environment with a fairly broad
- # write ACL. Users can also develop against custom configurations
- # by adding `foo.config` under their benchmark's kodata directory.
- environment: dev
-
- # Additional tags to tag the runs. These tags are added
- # to the list that the binary itself publishes (Kubernetes version, etc).
- # It is a comma separated list of tags.
- additionalTags: "key=value,absolute"
- dev.config: |
- ### Creating this benchmark:
- # mako create_benchmark test/performance/broker-latency/prod.config
- ### Updating this benchmark:
- # mako update_benchmark test/performance/broker-latency/prod.config
-
- project_name: "Knative"
- benchmark_name: "Broker Latency & Throughput"
- description: "Measure latency and throughput of the broker using various channels."
- benchmark_key: '5036156928393216'
-
- # Human owners for manual benchmark adjustments.
- owner_list: "grantrodgers@google.com"
- owner_list: "chizhg@google.com"
-
- # Only this robot should publish data to Mako for this key!
- owner_list: "mako-job@knative-eventing-performance.iam.gserviceaccount.com"
-
- # Define the name and type for x-axis of run charts
- input_value_info: {
- value_key: "t"
- label: "time"
- type: TIMESTAMP
- }
-
- # Note: value_key is stored repeatedly and should be very short (ideally one or two characters).
- metric_info_list: {
- value_key: "pl"
- label: "publish-latency"
- }
- metric_info_list: {
- value_key: "pe"
- label: "publish-errors"
- }
- metric_info_list: {
- value_key: "st"
- label: "send-throughput"
- }
- metric_info_list: {
- value_key: "dl"
- label: "deliver-latency"
- }
- metric_info_list: {
- value_key: "de"
- label: "deliver-errors"
- }
- metric_info_list: {
- value_key: "dt"
- label: "deliver-throughput"
- }
- metric_info_list: {
- value_key: "pet"
- label: "publish-failure-throughput"
- }
- metric_info_list: {
- value_key: "det"
- label: "deliver-failure-throughput"
- }
-
----
-
-apiVersion: v1
-kind: Service
-metadata:
- name: direct-perf-consumer
- namespace: perf-eventing
-spec:
- selector:
- role: direct-perf-consumer
- ports:
- - protocol: TCP
- port: 80
- targetPort: cloudevents
- name: http
-
----
-
-apiVersion: v1
-kind: Service
-metadata:
- name: direct-perf-aggregator
- namespace: perf-eventing
-spec:
- selector:
- role: direct-perf-aggregator
- ports:
- - name: grpc
- port: 10000
- targetPort: grpc
- protocol: TCP
-
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: perf-eventing
- namespace: perf-eventing
-
----
-
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: perf-eventing
-rules:
- - apiGroups: [""]
- resources: ["nodes", "pods"]
- verbs: ["list"]
-
----
-
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: perf-eventing
-subjects:
- - kind: ServiceAccount
- name: perf-eventing
- namespace: perf-eventing
-roleRef:
- kind: ClusterRole
- name: perf-eventing
- apiGroup: rbac.authorization.k8s.io
diff --git a/test/performance/direct/200-direct-perf.yaml b/test/performance/direct/200-direct-perf.yaml
deleted file mode 100644
index 5ab66c9d42b..00000000000
--- a/test/performance/direct/200-direct-perf.yaml
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 2019 The Knative Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: batch/v1
-kind: Job
-metadata:
- name: direct-perf-send-receive
- namespace: perf-eventing
- labels:
- role: direct-perf-consumer
-spec:
- completions: 1
- parallelism: 1
- backoffLimit: 0
- template:
- metadata:
- labels:
- role: direct-perf-consumer
- spec:
- serviceAccountName: perf-eventing
- restartPolicy: Never
- containers:
- - name: sender-receiver
- image: ko://knative.dev/eventing/test/test_images/performance
- args:
- - "--roles=sender,receiver"
- - "--sink=http://direct-perf-consumer.perf-eventing.svc.cluster.local"
- - "--aggregator=direct-perf-aggregator:10000"
- - "--pace=500:30,600:30,700:30,800:30,900:30,1000:30"
- - "--warmup=0"
- env:
- - name: GOGC
- value: "off"
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- resources:
- requests:
- cpu: 1000m
- memory: 6Gi
- ports:
- - name: cloudevents
- containerPort: 8080
-
----
-
-apiVersion: v1
-kind: Pod
-metadata:
- name: direct-perf-aggregator
- namespace: perf-eventing
- labels:
- role: direct-perf-aggregator
-spec:
- serviceAccountName: perf-eventing
- restartPolicy: Never
- containers:
- - name: aggregator
- image: ko://knative.dev/eventing/test/test_images/performance
- args:
- - "--roles=aggregator"
- # set to the number of sender + receiver (same image that does both counts 2)
- - "--expect-records=2"
- ports:
- - name: grpc
- containerPort: 10000
- resources:
- requests:
- cpu: 1000m
- memory: 2Gi
- env:
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- volumeMounts:
- - name: config-mako
- mountPath: /etc/config-mako
- terminationMessagePolicy: FallbackToLogsOnError
- - name: mako-stub
- image: ko://knative.dev/pkg/test/mako/stub-sidecar
- args:
- - "-p=10001"
- ports:
- - name: quickstore
- containerPort: 9813
- - name: results
- containerPort: 10001
- volumeMounts:
- - name: config-mako
- mountPath: /etc/config-mako
- terminationMessagePolicy: FallbackToLogsOnError
- volumes:
- - name: config-mako
- configMap:
- name: config-mako
diff --git a/test/performance/infra/OWNERS b/test/performance/infra/OWNERS
deleted file mode 100644
index 6dd1e299bf0..00000000000
--- a/test/performance/infra/OWNERS
+++ /dev/null
@@ -1,12 +0,0 @@
-# The OWNERS file is used by prow to automatically merge approved PRs.
-
-approvers:
-- productivity-writers
-- slinkydeveloper
-
-reviewers:
-- productivity-writers
-- slinkydeveloper
-
-labels:
-- area/performance
diff --git a/test/performance/infra/aggregator/aggregator.go b/test/performance/infra/aggregator/aggregator.go
deleted file mode 100644
index 510b74a92cb..00000000000
--- a/test/performance/infra/aggregator/aggregator.go
+++ /dev/null
@@ -1,384 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package aggregator
-
-import (
- "context"
- "fmt"
- "log"
- "net"
- "sort"
- "sync"
- "time"
-
- "github.com/google/mako/go/quickstore"
-
- "google.golang.org/grpc"
-
- "github.com/golang/protobuf/ptypes/timestamp"
-
- "knative.dev/pkg/ptr"
- "knative.dev/pkg/test/mako"
-
- tpb "github.com/google/mako/clients/proto/analyzers/threshold_analyzer_go_proto"
- mpb "github.com/google/mako/spec/proto/mako_go_proto"
-
- "knative.dev/eventing/test/performance/infra/common"
- pb "knative.dev/eventing/test/performance/infra/event_state"
-)
-
-const (
- maxRcvMsgSize = 1024 * 1024 * 1024
- publishFailureMessage = "Publish failure"
- deliverFailureMessage = "Delivery failure"
-)
-
-// thread-safe events recording map
-type eventsRecord struct {
- sync.RWMutex
- *pb.EventsRecord
-}
-
-var (
- fatalf = log.Fatalf
-
- pea = &tpb.ThresholdAnalyzerInput{
- Name: ptr.String("Publish error throughput"),
- Configs: []*tpb.ThresholdConfig{{
- Max: ptr.Float64(0),
- DataFilter: &mpb.DataFilter{
- DataType: mpb.DataFilter_METRIC_AGGREGATE_MAX.Enum(),
- ValueKey: ptr.String("pet"),
- },
- }},
- CrossRunConfig: mako.NewCrossRunConfig(10),
- }
- dea = &tpb.ThresholdAnalyzerInput{
- Name: ptr.String("Deliver error throughput"),
- Configs: []*tpb.ThresholdConfig{{
- Max: ptr.Float64(0),
- DataFilter: &mpb.DataFilter{
- DataType: mpb.DataFilter_METRIC_AGGREGATE_MAX.Enum(),
- ValueKey: ptr.String("det"),
- },
- }},
- CrossRunConfig: mako.NewCrossRunConfig(10),
- }
-)
-
-type Aggregator struct {
- // thread-safe events recording maps
- sentEvents *eventsRecord
- acceptedEvents *eventsRecord
- receivedEvents *eventsRecord
-
- // channel to notify the main goroutine that an events record has been received
- notifyEventsReceived chan struct{}
-
- // GRPC server
- listener net.Listener
- server *grpc.Server
-
- publishResults bool
- makoTags []string
- expectRecords uint
-}
-
-func NewAggregator(listenAddr string, expectRecords uint, makoTags []string, publishResults bool) (common.Executor, error) {
- l, err := net.Listen("tcp", listenAddr)
- if err != nil {
- return nil, fmt.Errorf("failed to create listener: %v", err)
- }
-
- executor := &Aggregator{
- listener: l,
- notifyEventsReceived: make(chan struct{}),
- makoTags: makoTags,
- expectRecords: expectRecords,
- publishResults: publishResults,
- }
-
- // --- Create GRPC server
- s := grpc.NewServer(grpc.MaxRecvMsgSize(maxRcvMsgSize))
- pb.RegisterEventsRecorderServer(s, executor)
- executor.server = s
-
- // --- Initialize records maps
- executor.sentEvents = &eventsRecord{EventsRecord: &pb.EventsRecord{
- Type: pb.EventsRecord_SENT,
- Events: make(map[string]*timestamp.Timestamp),
- }}
- executor.acceptedEvents = &eventsRecord{EventsRecord: &pb.EventsRecord{
- Type: pb.EventsRecord_ACCEPTED,
- Events: make(map[string]*timestamp.Timestamp),
- }}
- executor.receivedEvents = &eventsRecord{EventsRecord: &pb.EventsRecord{
- Type: pb.EventsRecord_RECEIVED,
- Events: make(map[string]*timestamp.Timestamp),
- }}
-
- return executor, nil
-}
-
-func (ag *Aggregator) Run(ctx context.Context) {
- var err error
- var client *mako.Client
- if ag.publishResults {
- log.Printf("Configuring Mako")
-
- makoClientCtx, cancel := context.WithTimeout(ctx, time.Minute*10)
- defer cancel()
-
- client, err = mako.Setup(makoClientCtx, ag.makoTags...)
- if err != nil {
- fatalf("Failed to setup mako: %v", err)
- }
-
- // Add Analyzers to detect performance regression.
- client.Quickstore.Input.ThresholdInputs = append(
- client.Quickstore.Input.ThresholdInputs,
- pea,
- dea)
-
- // Use a fresh context here so that our RPC to terminate the sidecar
- // isn't subject to our timeout (or we won't shut it down when we time out)
- defer client.ShutDownFunc(context.Background())
-
- // Wrap fatalf in a helper or our sidecar will live forever.
- fatalf = func(f string, args ...interface{}) {
- client.ShutDownFunc(context.Background())
- log.Fatalf(f, args...)
- }
-
- } else {
- log.Printf("Results won't be published to mako-stub")
- }
-
- // --- Run GRPC events receiver
- log.Printf("Starting events recorder server")
-
- go func() {
- if err := ag.server.Serve(ag.listener); err != nil {
- fatalf("Failed to serve: %v", err)
- }
- }()
- go func() {
- <-ctx.Done()
- log.Printf("Terminating events recorder server")
- ag.server.GracefulStop()
- }()
-
- // --- Wait for all records
- log.Printf("Expecting %d events records", ag.expectRecords)
- ag.waitForEvents()
- log.Printf("Received all expected events records")
-
- ag.server.GracefulStop()
-
- // --- Publish latencies
- log.Printf("Sent count: %d", len(ag.sentEvents.Events))
- log.Printf("Accepted count: %d", len(ag.acceptedEvents.Events))
- log.Printf("Received count: %d", len(ag.receivedEvents.Events))
-
- log.Printf("Calculating latencies")
-
- // count errors
- publishErrorTimestamps := make([]time.Time, 0)
- deliverErrorTimestamps := make([]time.Time, 0)
-
- for sentID := range ag.sentEvents.Events {
- timestampSentProto := ag.sentEvents.Events[sentID]
- timestampSent := timestampSentProto.AsTime()
-
- timestampAcceptedProto, accepted := ag.acceptedEvents.Events[sentID]
- timestampAccepted := timestampAcceptedProto.AsTime()
-
- timestampReceivedProto, received := ag.receivedEvents.Events[sentID]
- timestampReceived := timestampReceivedProto.AsTime()
-
- if !accepted {
- publishErrorTimestamps = append(publishErrorTimestamps, timestampSent)
- continue
- }
-
- if ag.publishResults {
- sendLatency := timestampAccepted.Sub(timestampSent)
- // Uncomment to get CSV directly from this container log
- // TODO add a flag to control whether we need this.
- // fmt.Printf("%f,%d,\n", mako.XTime(timestampSent), sendLatency.Nanoseconds())
- // TODO mako accepts float64, which imo could lead to losing some precision on local tests. It should accept int64
- if qerr := client.Quickstore.AddSamplePoint(mako.XTime(timestampSent), map[string]float64{"pl": sendLatency.Seconds()}); qerr != nil {
- log.Printf("ERROR AddSamplePoint for publish-latency: %v", qerr)
- }
- }
-
- if !received {
- deliverErrorTimestamps = append(deliverErrorTimestamps, timestampSent)
- continue
- }
-
- if ag.publishResults {
- e2eLatency := timestampReceived.Sub(timestampSent)
- // Uncomment to get CSV directly from this container log
- // TODO add a flag to control whether we need this.
- // fmt.Printf("%f,,%d\n", mako.XTime(timestampSent), e2eLatency.Nanoseconds())
- // TODO mako accepts float64, which imo could lead to losing some precision on local tests. It should accept int64
- if qerr := client.Quickstore.AddSamplePoint(mako.XTime(timestampSent), map[string]float64{"dl": e2eLatency.Seconds()}); qerr != nil {
- log.Printf("ERROR AddSamplePoint for deliver-latency: %v", qerr)
- }
- }
- }
-
- log.Printf("Publish failure count: %d", len(publishErrorTimestamps))
- log.Printf("Delivery failure count: %d", len(deliverErrorTimestamps))
-
- if ag.publishResults {
- log.Printf("Publishing errors")
-
- for _, t := range publishErrorTimestamps {
- if qerr := client.Quickstore.AddError(mako.XTime(t), publishFailureMessage); qerr != nil {
- log.Printf("ERROR AddError for publish-failure: %v", qerr)
- }
- }
-
- for _, t := range deliverErrorTimestamps {
- if qerr := client.Quickstore.AddError(mako.XTime(t), deliverFailureMessage); qerr != nil {
- log.Printf("ERROR AddSamplePoint for deliver-failure: %v", qerr)
- }
- }
-
- log.Printf("Publishing throughputs")
-
- sentTimestamps := eventsToTimestampsArray(&ag.sentEvents.Events)
- err = publishThpt(sentTimestamps, client.Quickstore, "st")
- if err != nil {
- log.Printf("ERROR AddSamplePoint for send-throughput: %v", err)
- }
-
- receivedTimestamps := eventsToTimestampsArray(&ag.receivedEvents.Events)
- err = publishThpt(receivedTimestamps, client.Quickstore, "dt")
- if err != nil {
- log.Printf("ERROR AddSamplePoint for deliver-throughput: %v", err)
- }
-
- err = publishThpt(publishErrorTimestamps, client.Quickstore, "pet")
- if err != nil {
- log.Printf("ERROR AddSamplePoint for publish-failure-throughput: %v", err)
- }
-
- err = publishThpt(deliverErrorTimestamps, client.Quickstore, "det")
- if err != nil {
- log.Printf("ERROR AddSamplePoint for deliver-failure-throughput: %v", err)
- }
-
- log.Printf("Publishing aggregates")
-
- client.Quickstore.AddRunAggregate("pe", float64(len(publishErrorTimestamps)))
- client.Quickstore.AddRunAggregate("de", float64(len(deliverErrorTimestamps)))
-
- log.Printf("Store to mako")
-
- if err := client.StoreAndHandleResult(); err != nil {
- fatalf("Failed to store data and handle the result: %v\n", err)
- }
- }
-
- log.Printf("Aggregation completed")
-}
-
-func eventsToTimestampsArray(events *map[string]*timestamp.Timestamp) []time.Time {
- values := make([]time.Time, 0, len(*events))
- for _, v := range *events {
- t := v.AsTime()
- values = append(values, t)
- }
- return values
-}
-
-func publishThpt(timestamps []time.Time, q *quickstore.Quickstore, metricName string) error {
- if len(timestamps) >= 2 {
- sort.Slice(timestamps, func(x, y int) bool { return timestamps[x].Before(timestamps[y]) })
- var i, thpt int
- for j, t := range timestamps[1:] {
- thpt++
- for i < j && t.Sub(timestamps[i]) > time.Second {
- i++
- thpt--
- }
- if qerr := q.AddSamplePoint(mako.XTime(t), map[string]float64{metricName: float64(thpt)}); qerr != nil {
- return qerr
- }
- }
- } else if len(timestamps) == 1 {
- if qerr := q.AddSamplePoint(mako.XTime(timestamps[0]), map[string]float64{metricName: 1}); qerr != nil {
- return qerr
- }
- } else {
- if qerr := q.AddSamplePoint(mako.XTime(time.Now()), map[string]float64{metricName: 0}); qerr != nil {
- return qerr
- }
- }
- return nil
-}
-
-// waitForEvents blocks until the expected number of events records has been received.
-func (ag *Aggregator) waitForEvents() {
- for receivedRecords := uint(0); receivedRecords < ag.expectRecords; receivedRecords++ {
- <-ag.notifyEventsReceived
- }
-}
-
-// RecordSentEvents implements event_state.EventsRecorder
-func (ag *Aggregator) RecordEvents(_ context.Context, in *pb.EventsRecordList) (*pb.RecordReply, error) {
- defer func() {
- ag.notifyEventsReceived <- struct{}{}
- }()
-
- for _, recIn := range in.Items {
- recType := recIn.GetType()
-
- var rec *eventsRecord
-
- switch recType {
- case pb.EventsRecord_SENT:
- rec = ag.sentEvents
- case pb.EventsRecord_ACCEPTED:
- rec = ag.acceptedEvents
- case pb.EventsRecord_RECEIVED:
- rec = ag.receivedEvents
- default:
- log.Printf("Ignoring events record of type %s", recType)
- continue
- }
-
- log.Printf("-> Recording %d %s events", uint64(len(recIn.Events)), recType)
-
- func() {
- rec.Lock()
- defer rec.Unlock()
- for id, t := range recIn.Events {
- if _, exists := rec.Events[id]; exists {
- log.Printf("!! Found duplicate %s event ID %s", recType, id)
- continue
- }
- rec.Events[id] = t
- }
- }()
- }
-
- return &pb.RecordReply{Count: uint32(len(in.Items))}, nil
-}
diff --git a/test/performance/infra/common/constants.go b/test/performance/infra/common/constants.go
deleted file mode 100644
index f1ac085e7ff..00000000000
--- a/test/performance/infra/common/constants.go
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package common
-
-import "time"
-
-const (
- WarmupEventType = "warmup.perf-test"
- MeasureEventType = "measure.perf-test"
- GCEventType = "gc.perf-test"
- EndEventType = "end.perf-test"
- CEReceiverPort = "8080"
- WaitForFlush = 5 * time.Second
- WaitForReceiverGC = 3 * time.Second
- WaitAfterWarmup = 5 * time.Second
-)
diff --git a/test/performance/infra/common/event_timestamp.go b/test/performance/infra/common/event_timestamp.go
deleted file mode 100644
index 79618494bdf..00000000000
--- a/test/performance/infra/common/event_timestamp.go
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package common
-
-import "github.com/golang/protobuf/ptypes/timestamp"
-
-// EventTimestamp is the recorded EventTimestamp of an event.
-type EventTimestamp struct {
- EventId string
- At *timestamp.Timestamp
-}
diff --git a/test/performance/infra/common/executor.go b/test/performance/infra/common/executor.go
deleted file mode 100644
index 1f0599b9722..00000000000
--- a/test/performance/infra/common/executor.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package common
-
-import (
- "context"
- "sync"
-)
-
-type Executor interface {
- Run(ctx context.Context)
-}
-
-type Executors []Executor
-
-func (e Executors) Run(ctx context.Context) {
- waitingExecutors := sync.WaitGroup{}
-
- for _, exec := range e {
- waitingExecutors.Add(1)
- go func(executor Executor) {
- defer waitingExecutors.Done()
- executor.Run(ctx)
- }(exec)
- }
-
- waitingExecutors.Wait()
-}
diff --git a/test/performance/infra/common/pace.go b/test/performance/infra/common/pace.go
deleted file mode 100644
index 3481c0929f4..00000000000
--- a/test/performance/infra/common/pace.go
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package common
-
-import (
- "fmt"
- "strconv"
- "strings"
- "time"
-)
-
-const defaultPaceDuration = 10 * time.Second
-
-type PaceSpec struct {
- Rps int
- Duration time.Duration
-}
-
-// We need those estimates to allocate memory before benchmark starts
-func CalculateMemoryConstraintsForPaceSpecs(paceSpecs []PaceSpec) (estimatedNumberOfMessagesInsideAChannel uint64, estimatedNumberOfTotalMessages uint64) {
- for _, pacer := range paceSpecs {
- totalMessages := uint64(pacer.Rps * int(pacer.Duration.Seconds()))
- // Add a bit more, just to be sure that we don't under allocate
- totalMessages = totalMessages + uint64(float64(totalMessages)*0.1)
- // Aggressively set the queue length so enqueue operation won't be blocked
- // as the total number of messages grows.
- queueLength := uint64(pacer.Rps * 5)
- estimatedNumberOfTotalMessages += totalMessages
- if queueLength > estimatedNumberOfMessagesInsideAChannel {
- estimatedNumberOfMessagesInsideAChannel = queueLength
- }
- }
- return
-}
-
-func ParsePaceSpec(pace string) ([]PaceSpec, error) {
- paceSpecArray := strings.Split(pace, ",")
- pacerSpecs := make([]PaceSpec, 0)
-
- for _, p := range paceSpecArray {
- ps := strings.Split(p, ":")
- rps, err := strconv.Atoi(ps[0])
- if err != nil {
- return nil, fmt.Errorf("invalid format %q: %v", ps, err)
- }
- duration := defaultPaceDuration
-
- if len(ps) == 2 {
- durationSec, err := strconv.Atoi(ps[1])
- if err != nil {
- return nil, fmt.Errorf("invalid format %q: %v", ps, err)
- }
- duration = time.Second * time.Duration(durationSec)
- }
-
- pacerSpecs = append(pacerSpecs, PaceSpec{rps, duration})
- }
-
- return pacerSpecs, nil
-}
diff --git a/test/performance/infra/event_state/client.go b/test/performance/infra/event_state/client.go
deleted file mode 100644
index 96b9c232313..00000000000
--- a/test/performance/infra/event_state/client.go
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package event_state
-
-import (
- "context"
- "fmt"
- "time"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials/insecure"
-)
-
-const publishTimeout = 1 * time.Minute
-
-type AggregatorClient struct {
- conn *grpc.ClientConn
- aggCli EventsRecorderClient
-}
-
-func NewAggregatorClient(aggregAddr string) (*AggregatorClient, error) {
- // create a connection to the aggregator
- conn, err := grpc.Dial(aggregAddr, grpc.WithTransportCredentials(insecure.NewCredentials()))
- if err != nil {
- return nil, fmt.Errorf("Failed to connect to the aggregator: %v", err)
- }
-
- aggCli := NewEventsRecorderClient(conn)
-
- return &AggregatorClient{conn, aggCli}, nil
-}
-
-func (ac *AggregatorClient) Publish(rl *EventsRecordList) error {
- return ac.publishWithTimeout(publishTimeout, rl)
-}
-
-func (ac *AggregatorClient) publishWithTimeout(timeout time.Duration, rl *EventsRecordList) error {
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- defer cancel()
- _, err := ac.aggCli.RecordEvents(ctx, rl)
- return err
-}
-
-func (ac *AggregatorClient) Close() {
- _ = ac.conn.Close()
-}
diff --git a/test/performance/infra/event_state/event_state.pb.go b/test/performance/infra/event_state/event_state.pb.go
deleted file mode 100644
index 175c1ef241b..00000000000
--- a/test/performance/infra/event_state/event_state.pb.go
+++ /dev/null
@@ -1,298 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: event_state.proto
-
-package event_state
-
-import (
- context "context"
- fmt "fmt"
- math "math"
-
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type EventsRecord_Type int32
-
-const (
- EventsRecord_UNKNOWN EventsRecord_Type = 0
- EventsRecord_SENT EventsRecord_Type = 1
- EventsRecord_ACCEPTED EventsRecord_Type = 2
- EventsRecord_RECEIVED EventsRecord_Type = 3
-)
-
-var EventsRecord_Type_name = map[int32]string{
- 0: "UNKNOWN",
- 1: "SENT",
- 2: "ACCEPTED",
- 3: "RECEIVED",
-}
-
-var EventsRecord_Type_value = map[string]int32{
- "UNKNOWN": 0,
- "SENT": 1,
- "ACCEPTED": 2,
- "RECEIVED": 3,
-}
-
-func (x EventsRecord_Type) String() string {
- return proto.EnumName(EventsRecord_Type_name, int32(x))
-}
-
-func (EventsRecord_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_de3fba9d879b76ae, []int{0, 0}
-}
-
-type EventsRecord struct {
- Events map[string]*timestamp.Timestamp `protobuf:"bytes,1,rep,name=Events,proto3" json:"Events,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- Type EventsRecord_Type `protobuf:"varint,2,opt,name=type,proto3,enum=event_state.EventsRecord_Type" json:"type,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EventsRecord) Reset() { *m = EventsRecord{} }
-func (m *EventsRecord) String() string { return proto.CompactTextString(m) }
-func (*EventsRecord) ProtoMessage() {}
-func (*EventsRecord) Descriptor() ([]byte, []int) {
- return fileDescriptor_de3fba9d879b76ae, []int{0}
-}
-
-func (m *EventsRecord) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EventsRecord.Unmarshal(m, b)
-}
-func (m *EventsRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EventsRecord.Marshal(b, m, deterministic)
-}
-func (m *EventsRecord) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EventsRecord.Merge(m, src)
-}
-func (m *EventsRecord) XXX_Size() int {
- return xxx_messageInfo_EventsRecord.Size(m)
-}
-func (m *EventsRecord) XXX_DiscardUnknown() {
- xxx_messageInfo_EventsRecord.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EventsRecord proto.InternalMessageInfo
-
-func (m *EventsRecord) GetEvents() map[string]*timestamp.Timestamp {
- if m != nil {
- return m.Events
- }
- return nil
-}
-
-func (m *EventsRecord) GetType() EventsRecord_Type {
- if m != nil {
- return m.Type
- }
- return EventsRecord_UNKNOWN
-}
-
-type EventsRecordList struct {
- Items []*EventsRecord `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EventsRecordList) Reset() { *m = EventsRecordList{} }
-func (m *EventsRecordList) String() string { return proto.CompactTextString(m) }
-func (*EventsRecordList) ProtoMessage() {}
-func (*EventsRecordList) Descriptor() ([]byte, []int) {
- return fileDescriptor_de3fba9d879b76ae, []int{1}
-}
-
-func (m *EventsRecordList) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EventsRecordList.Unmarshal(m, b)
-}
-func (m *EventsRecordList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EventsRecordList.Marshal(b, m, deterministic)
-}
-func (m *EventsRecordList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EventsRecordList.Merge(m, src)
-}
-func (m *EventsRecordList) XXX_Size() int {
- return xxx_messageInfo_EventsRecordList.Size(m)
-}
-func (m *EventsRecordList) XXX_DiscardUnknown() {
- xxx_messageInfo_EventsRecordList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EventsRecordList proto.InternalMessageInfo
-
-func (m *EventsRecordList) GetItems() []*EventsRecord {
- if m != nil {
- return m.Items
- }
- return nil
-}
-
-type RecordReply struct {
- Count uint32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RecordReply) Reset() { *m = RecordReply{} }
-func (m *RecordReply) String() string { return proto.CompactTextString(m) }
-func (*RecordReply) ProtoMessage() {}
-func (*RecordReply) Descriptor() ([]byte, []int) {
- return fileDescriptor_de3fba9d879b76ae, []int{2}
-}
-
-func (m *RecordReply) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RecordReply.Unmarshal(m, b)
-}
-func (m *RecordReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RecordReply.Marshal(b, m, deterministic)
-}
-func (m *RecordReply) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RecordReply.Merge(m, src)
-}
-func (m *RecordReply) XXX_Size() int {
- return xxx_messageInfo_RecordReply.Size(m)
-}
-func (m *RecordReply) XXX_DiscardUnknown() {
- xxx_messageInfo_RecordReply.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RecordReply proto.InternalMessageInfo
-
-func (m *RecordReply) GetCount() uint32 {
- if m != nil {
- return m.Count
- }
- return 0
-}
-
-func init() {
- proto.RegisterEnum("event_state.EventsRecord_Type", EventsRecord_Type_name, EventsRecord_Type_value)
- proto.RegisterType((*EventsRecord)(nil), "event_state.EventsRecord")
- proto.RegisterMapType((map[string]*timestamp.Timestamp)(nil), "event_state.EventsRecord.EventsEntry")
- proto.RegisterType((*EventsRecordList)(nil), "event_state.EventsRecordList")
- proto.RegisterType((*RecordReply)(nil), "event_state.RecordReply")
-}
-
-func init() { proto.RegisterFile("event_state.proto", fileDescriptor_de3fba9d879b76ae) }
-
-var fileDescriptor_de3fba9d879b76ae = []byte{
- // 328 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x50, 0x4f, 0x4b, 0xfb, 0x40,
- 0x10, 0x6d, 0xd2, 0x3f, 0xbf, 0xfe, 0x66, 0x6b, 0x59, 0x17, 0x0f, 0x31, 0xa0, 0x96, 0x88, 0xd0,
- 0x53, 0x2a, 0xf1, 0xa2, 0x82, 0x07, 0x49, 0xf7, 0x50, 0x94, 0x28, 0x6b, 0xaa, 0x07, 0x0f, 0xd2,
- 0xd6, 0xb1, 0x14, 0xdb, 0x6e, 0x48, 0xb6, 0x85, 0x7c, 0x0e, 0xbf, 0xb0, 0x24, 0x9b, 0xc2, 0x7a,
- 0xe8, 0x6d, 0xde, 0xcc, 0x7b, 0xf3, 0xe6, 0x0d, 0x1c, 0xe2, 0x16, 0xd7, 0xea, 0x23, 0x53, 0x13,
- 0x85, 0x7e, 0x92, 0x4a, 0x25, 0x19, 0x31, 0x5a, 0xee, 0xd9, 0x5c, 0xca, 0xf9, 0x12, 0x07, 0xe5,
- 0x68, 0xba, 0xf9, 0x1a, 0xa8, 0xc5, 0x0a, 0x33, 0x35, 0x59, 0x25, 0x9a, 0xed, 0xfd, 0xd8, 0xd0,
- 0xe1, 0x85, 0x20, 0x13, 0x38, 0x93, 0xe9, 0x27, 0xbb, 0x83, 0x96, 0xc6, 0x8e, 0xd5, 0xab, 0xf7,
- 0x49, 0x70, 0xe1, 0x9b, 0x16, 0x26, 0xb5, 0x02, 0x7c, 0xad, 0xd2, 0x5c, 0x54, 0x22, 0x16, 0x40,
- 0x43, 0xe5, 0x09, 0x3a, 0x76, 0xcf, 0xea, 0x77, 0x83, 0xd3, 0xfd, 0xe2, 0x38, 0x4f, 0x50, 0x94,
- 0x5c, 0x77, 0x0c, 0xc4, 0x58, 0xc5, 0x28, 0xd4, 0xbf, 0x31, 0x77, 0xac, 0x9e, 0xd5, 0xff, 0x2f,
- 0x8a, 0x92, 0x5d, 0x42, 0x73, 0x3b, 0x59, 0x6e, 0xf4, 0x56, 0x12, 0xb8, 0xbe, 0x4e, 0xe5, 0xef,
- 0x52, 0xf9, 0xf1, 0x2e, 0x95, 0xd0, 0xc4, 0x5b, 0xfb, 0xda, 0xf2, 0x6e, 0xa0, 0x51, 0x98, 0x30,
- 0x02, 0xff, 0xc6, 0xd1, 0x43, 0xf4, 0xf4, 0x16, 0xd1, 0x1a, 0x6b, 0x43, 0xe3, 0x85, 0x47, 0x31,
- 0xb5, 0x58, 0x07, 0xda, 0xf7, 0x61, 0xc8, 0x9f, 0x63, 0x3e, 0xa4, 0x76, 0x81, 0x04, 0x0f, 0xf9,
- 0xe8, 0x95, 0x0f, 0x69, 0xdd, 0x0b, 0x81, 0x9a, 0xc7, 0x3e, 0x2e, 0x32, 0xc5, 0x06, 0xd0, 0x5c,
- 0x28, 0x5c, 0xed, 0xfe, 0x72, 0xbc, 0x37, 0x9a, 0xd0, 0x3c, 0xef, 0x1c, 0x48, 0xd5, 0xc0, 0x64,
- 0x99, 0xb3, 0x23, 0x68, 0xce, 0xe4, 0x66, 0xad, 0xca, 0x60, 0x07, 0x42, 0x83, 0xe0, 0x1d, 0xba,
- 0xa6, 0x16, 0x53, 0x36, 0x82, 0x8e, 0xae, 0xab, 0x8f, 0x9e, 0xec, 0x35, 0x2a, 0xce, 0x72, 0x9d,
- 0x3f, 0x63, 0xc3, 0xd0, 0xab, 0x4d, 0x5b, 0xe5, 0x83, 0xae, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff,
- 0x1f, 0xfa, 0x30, 0x3b, 0x26, 0x02, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// EventsRecorderClient is the client API for EventsRecorder service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type EventsRecorderClient interface {
- RecordEvents(ctx context.Context, in *EventsRecordList, opts ...grpc.CallOption) (*RecordReply, error)
-}
-
-type eventsRecorderClient struct {
- cc *grpc.ClientConn
-}
-
-func NewEventsRecorderClient(cc *grpc.ClientConn) EventsRecorderClient {
- return &eventsRecorderClient{cc}
-}
-
-func (c *eventsRecorderClient) RecordEvents(ctx context.Context, in *EventsRecordList, opts ...grpc.CallOption) (*RecordReply, error) {
- out := new(RecordReply)
- err := c.cc.Invoke(ctx, "/event_state.EventsRecorder/RecordEvents", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// EventsRecorderServer is the server API for EventsRecorder service.
-type EventsRecorderServer interface {
- RecordEvents(context.Context, *EventsRecordList) (*RecordReply, error)
-}
-
-// UnimplementedEventsRecorderServer can be embedded to have forward compatible implementations.
-type UnimplementedEventsRecorderServer struct {
-}
-
-func (*UnimplementedEventsRecorderServer) RecordEvents(ctx context.Context, req *EventsRecordList) (*RecordReply, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RecordEvents not implemented")
-}
-
-func RegisterEventsRecorderServer(s *grpc.Server, srv EventsRecorderServer) {
- s.RegisterService(&_EventsRecorder_serviceDesc, srv)
-}
-
-func _EventsRecorder_RecordEvents_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(EventsRecordList)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(EventsRecorderServer).RecordEvents(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/event_state.EventsRecorder/RecordEvents",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(EventsRecorderServer).RecordEvents(ctx, req.(*EventsRecordList))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _EventsRecorder_serviceDesc = grpc.ServiceDesc{
- ServiceName: "event_state.EventsRecorder",
- HandlerType: (*EventsRecorderServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "RecordEvents",
- Handler: _EventsRecorder_RecordEvents_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "event_state.proto",
-}
diff --git a/test/performance/infra/event_state/event_state.proto b/test/performance/infra/event_state/event_state.proto
deleted file mode 100644
index c649e08576c..00000000000
--- a/test/performance/infra/event_state/event_state.proto
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2019 The Knative Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-package event_state;
-
-import "google/protobuf/timestamp.proto";
-
-message EventsRecord {
- map Events = 1;
-
- enum Type {
- UNKNOWN = 0;
- SENT = 1;
- ACCEPTED = 2;
- RECEIVED = 3;
- }
- Type type = 2;
-}
-
-message EventsRecordList {
- repeated EventsRecord items = 1;
-}
-
-service EventsRecorder{
- rpc RecordEvents(EventsRecordList) returns (RecordReply) {}
-}
-
-message RecordReply {
- uint32 count = 1;
-}
diff --git a/test/performance/infra/image_helpers.go b/test/performance/infra/image_helpers.go
deleted file mode 100644
index b3bc53060a4..00000000000
--- a/test/performance/infra/image_helpers.go
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package infra
-
-import (
- "context"
- "flag"
- "fmt"
- "log"
- "os"
- "strings"
-
- "k8s.io/client-go/kubernetes"
- "knative.dev/pkg/signals"
- pkgtest "knative.dev/pkg/test"
-
- "knative.dev/eventing/test/performance/infra/aggregator"
- "knative.dev/eventing/test/performance/infra/common"
- "knative.dev/eventing/test/performance/infra/receiver"
- "knative.dev/eventing/test/performance/infra/sender"
-)
-
-//go:generate protoc -I ./event_state --go_out=plugins=grpc:./event_state ./event_state/event_state.proto
-
-var (
- roles string
-
- // role=sender
- aggregAddr string
- msgSize uint
- paceFlag string
- warmupSeconds uint
- fixedBody bool
-
- // role=aggregator
- expectRecords uint
- listenAddr string
- makoTags string
- publish bool
-)
-
-const (
- defaultTestNamespace = "default"
- podNamespaceEnvVar = "POD_NAMESPACE"
-)
-
-func DeclareFlags() {
- flag.StringVar(&roles, "roles", "", `Role of this instance. One or multiple (comma-separated) of ("sender", "receiver", "aggregator")`)
-
- // receiver & sender flags
- flag.StringVar(&paceFlag, "pace", "", "Pace array comma separated. Format rps[:duration=10s]. Example 100,200:4,100:1,500:60")
-
- // sender flags
- flag.StringVar(&aggregAddr, "aggregator", "", "The aggregator address for sending events records.")
- flag.UintVar(&msgSize, "msg-size", 100, "The size in bytes of each message we want to send. Generate random strings to avoid caching.")
- flag.UintVar(&warmupSeconds, "warmup", 10, "Duration in seconds of warmup phase. During warmup latencies are not recorded. 0 means no warmup")
- flag.BoolVar(&fixedBody, "generate-payload-on-each-request", true, "Produce unique body contents for each call")
-
- // aggregator flags
- flag.StringVar(&listenAddr, "listen-address", ":10000", "Network address the aggregator listens on.")
- flag.UintVar(&expectRecords, "expect-records", 2, "Number of expected events records before aggregating data.")
- flag.StringVar(&makoTags, "mako-tags", "", "Comma separated list of benchmark specific Mako tags.")
- flag.BoolVar(&publish, "publish", true, "Publish the results to mako-stub (default true)")
-}
-
-func StartPerformanceImage(factory sender.LoadGeneratorFactory, typeExtractor receiver.TypeExtractor, idExtractor receiver.IdExtractor) {
- // We want this for properly handling Kubernetes container lifecycle events.
- ctx := signals.NewContext()
-
- if roles == "" {
- panic("--roles not set!")
- }
-
- var execs []common.Executor
-
- if strings.Contains(roles, "receiver") {
- if paceFlag == "" {
- panic("--pace not set!")
- }
- if aggregAddr == "" {
- panic("--aggregator not set!")
- }
-
- log.Println("Creating a receiver")
-
- receiver, err := receiver.NewReceiver(paceFlag, aggregAddr, warmupSeconds, typeExtractor, idExtractor)
- if err != nil {
- panic(err)
- }
-
- execs = append(execs, receiver)
- }
-
- if strings.Contains(roles, "sender") {
- if paceFlag == "" {
- panic("--pace not set!")
- }
- if aggregAddr == "" {
- panic("--aggregator not set!")
- }
-
- log.Println("Creating a sender")
-
- sender, err := sender.NewSender(factory, aggregAddr, msgSize, warmupSeconds, paceFlag, fixedBody)
- if err != nil {
- panic(err)
- }
-
- execs = append(execs, sender)
- }
-
- if strings.Contains(roles, "aggregator") {
- log.Println("Creating an aggregator")
-
- aggr, err := aggregator.NewAggregator(listenAddr, expectRecords, strings.Split(makoTags, ","), publish)
- if err != nil {
- panic(err)
- }
-
- execs = append(execs, aggr)
- }
-
- // wait until all pods are ready
- ns := testNamespace()
- log.Printf("Waiting for all Pods to be ready in namespace %s", ns)
- if err := waitForPods(ns); err != nil {
- panic(fmt.Errorf("timeout waiting for Pods readiness in namespace %s: %v", ns, err))
- }
-
- log.Printf("Starting %d executors", len(execs))
-
- common.Executors(execs).Run(ctx)
-
- log.Println("Performance image completed")
-}
-
-func testNamespace() string {
- if pn := os.Getenv(podNamespaceEnvVar); pn != "" {
- return pn
- }
- return defaultTestNamespace
-}
-
-func waitForPods(namespace string) error {
- cfg, err := pkgtest.BuildClientConfig("", "")
- if err != nil {
- return err
- }
-
- kClient, err := kubernetes.NewForConfig(cfg)
- if err != nil {
- return err
- }
-
- return pkgtest.WaitForAllPodsRunning(context.Background(), kClient, namespace)
-}
diff --git a/test/performance/infra/receiver/id_extractor.go b/test/performance/infra/receiver/id_extractor.go
deleted file mode 100644
index 0337b9840eb..00000000000
--- a/test/performance/infra/receiver/id_extractor.go
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package receiver
-
-import cloudevents "github.com/cloudevents/sdk-go/v2"
-
-// IdExtractor is used by the receiver to establish the id of the received event
-type IdExtractor func(event cloudevents.Event) string
-
-// EventIdExtractor uses
-//
-// event.ID()
-//
-// to extract the event id
-func EventIdExtractor(event cloudevents.Event) string {
- return event.ID()
-}
diff --git a/test/performance/infra/receiver/receiver.go b/test/performance/infra/receiver/receiver.go
deleted file mode 100644
index b251335e8ce..00000000000
--- a/test/performance/infra/receiver/receiver.go
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package receiver
-
-import (
- "context"
- "fmt"
- "log"
- "net"
- "runtime"
- "time"
-
- cloudevents "github.com/cloudevents/sdk-go/v2"
- "github.com/golang/protobuf/ptypes/timestamp"
- "google.golang.org/protobuf/types/known/timestamppb"
-
- "knative.dev/eventing/test/performance/infra/common"
- pb "knative.dev/eventing/test/performance/infra/event_state"
-)
-
-const shutdownWaitTime = time.Second * 5
-
-// Receiver records the received events and sends to the aggregator.
-// Since sender implementations can put id and type of event inside the event payload,
-// then the Receiver uses IdExtractor and TypeExtractor to extract them
-type Receiver struct {
- typeExtractor TypeExtractor
- idExtractor IdExtractor
- timeout time.Duration
-
- receivedCh chan common.EventTimestamp
- endCh chan struct{}
- receivedEvents *pb.EventsRecord
-
- // aggregator GRPC client
- aggregatorClient *pb.AggregatorClient
-}
-
-func NewReceiver(paceFlag string, aggregAddr string, warmupSeconds uint, typeExtractor TypeExtractor, idExtractor IdExtractor) (common.Executor, error) {
- pace, err := common.ParsePaceSpec(paceFlag)
- if err != nil {
- return nil, err
- }
-
- // create a connection to the aggregator
- aggregatorClient, err := pb.NewAggregatorClient(aggregAddr)
- if err != nil {
- return nil, err
- }
-
- channelSize, totalMessages := common.CalculateMemoryConstraintsForPaceSpecs(pace)
-
- // Calculate timeout for receiver
- var timeout time.Duration
- timeout = time.Second * time.Duration(warmupSeconds)
- if timeout != 0 {
- timeout += common.WaitAfterWarmup
- }
- for _, p := range pace {
- timeout += p.Duration + common.WaitForFlush + common.WaitForReceiverGC
- }
- // The timeout is doubled because the sender is slowed down by the SUT when the load is too high and test requires more than needed.
- // Coefficient of 2 is based on experimental evidence.
- // More: https://github.com/knative/eventing/pull/2195#discussion_r348368914
- timeout *= 2
-
- return &Receiver{
- typeExtractor: typeExtractor,
- idExtractor: idExtractor,
- timeout: timeout,
- receivedCh: make(chan common.EventTimestamp, channelSize),
- endCh: make(chan struct{}, 1),
- receivedEvents: &pb.EventsRecord{
- Type: pb.EventsRecord_RECEIVED,
- Events: make(map[string]*timestamp.Timestamp, totalMessages),
- },
- aggregatorClient: aggregatorClient,
- }, nil
-}
-
-func (r *Receiver) Run(ctx context.Context) {
- // Wait the port before starting the ce receiver
- waitForPortAvailable(common.CEReceiverPort)
-
- receiverCtx, closeReceiver := context.WithCancel(ctx)
-
- go func() {
- if err := r.startCloudEventsReceiver(receiverCtx); err != nil {
- log.Fatalf("Failed to start CloudEvents receiver: %v", err)
- }
- }()
-
- // When the testing service is degraded, there is a chance that the end message is not received
- // This timer sends to endCh a signal to stop processing events and start tear down of receiver
- timeoutTimer := time.AfterFunc(r.timeout, func() {
- log.Printf("Receiver timeout")
- r.endCh <- struct{}{}
- })
- log.Printf("Started receiver timeout timer of duration %v", r.timeout)
-
- r.processEvents()
-
- // Stop the timeoutTimer in case the tear down was triggered by end message
- timeoutTimer.Stop()
-
- closeReceiver()
-
- log.Println("Receiver closed")
-
- log.Printf("%-15s: %d", "Received count", len(r.receivedEvents.Events))
-
- if err := r.aggregatorClient.Publish(&pb.EventsRecordList{Items: []*pb.EventsRecord{
- r.receivedEvents,
- }}); err != nil {
- log.Fatalf("Failed to send events record: %v\n", err)
- }
-
- close(r.receivedCh)
-}
-
-func (r *Receiver) processEvents() {
- for {
- select {
- case e, ok := <-r.receivedCh:
- if !ok {
- return
- }
- r.receivedEvents.Events[e.EventId] = e.At
- case <-r.endCh:
- return
- }
- }
-}
-
-func (r *Receiver) startCloudEventsReceiver(ctx context.Context) error {
- cli, err := cloudevents.NewClientHTTP()
- if err != nil {
- return fmt.Errorf("failed to create CloudEvents client: %v", err)
- }
-
- log.Printf("CloudEvents receiver started")
- return cli.StartReceiver(ctx, r.processReceiveEvent)
-}
-
-// processReceiveEvent processes the event received by the CloudEvents receiver.
-func (r *Receiver) processReceiveEvent(event cloudevents.Event) {
- t := r.typeExtractor(event)
- switch t {
- case common.MeasureEventType:
- r.receivedCh <- common.EventTimestamp{EventId: r.idExtractor(event), At: timestamppb.Now()}
- case common.GCEventType:
- runtime.GC()
- case common.EndEventType:
- log.Printf("End message received correctly")
- // Wait a bit so all messages on wire are processed
- time.AfterFunc(shutdownWaitTime, func() {
- r.endCh <- struct{}{}
- })
- }
-}
-
-// waitForPortAvailable waits until the given TCP port is available.
-func waitForPortAvailable(port string) {
- var free bool
- for i := 0; i < 30; i++ {
- conn, err := net.Dial("tcp", ":"+port)
- if _, ok := err.(*net.OpError); ok {
- free = true
- break
- }
- _ = conn.Close()
- time.Sleep(10 * time.Millisecond)
- }
- if !free {
- log.Fatalf("Timeout waiting for TCP port %s to become available\n", port)
- }
-}
diff --git a/test/performance/infra/receiver/type_extractor.go b/test/performance/infra/receiver/type_extractor.go
deleted file mode 100644
index c7a7639af9e..00000000000
--- a/test/performance/infra/receiver/type_extractor.go
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package receiver
-
-import cloudevents "github.com/cloudevents/sdk-go/v2"
-
-// TypeExtractor is used by the receiver to establish the type of the received event
-type TypeExtractor func(event cloudevents.Event) string
-
-// EventTypeExtractor uses
-//
-// event.Type()
-//
-// to extract the event type
-func EventTypeExtractor(event cloudevents.Event) string {
- return event.Type()
-}
diff --git a/test/performance/infra/sender/http_load_generator.go b/test/performance/infra/sender/http_load_generator.go
deleted file mode 100644
index cc63a1b9612..00000000000
--- a/test/performance/infra/sender/http_load_generator.go
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package sender
-
-import (
- "context"
- "fmt"
- "math/rand"
- "net"
- "net/http"
- "time"
-
- cloudevents "github.com/cloudevents/sdk-go/v2"
- "github.com/google/uuid"
- "github.com/rogpeppe/fastuuid"
- vegeta "github.com/tsenart/vegeta/v12/lib"
- "google.golang.org/protobuf/types/known/timestamppb"
-
- "knative.dev/eventing/test/performance/infra/common"
-)
-
-func init() {
- rand.Seed(time.Now().UnixNano())
-}
-
-type CloudEventsTargeter struct {
- sinkUrl string
- msgSize uint
- eventType string
- eventSource string
- body []byte
- fixedBody bool
-}
-
-var letterBytes = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
-
-const markLetter = byte('"')
-
-// generateRandString returns a random string with the given length.
-func generateRandStringPayload(length uint) []byte {
- b := make([]byte, length)
- b[0] = markLetter
- for i := uint(1); i < length-1; i++ {
- b[i] = letterBytes[rand.Intn(len(letterBytes))]
- }
- b[length-1] = markLetter
- return b
-}
-
-func NewCloudEventsTargeter(sinkUrl string, msgSize uint, eventType string, eventSource string, fixedBody bool) CloudEventsTargeter {
- var body []byte
-
- if fixedBody {
- body = generateRandStringPayload(msgSize)
- }
- return CloudEventsTargeter{
- sinkUrl: sinkUrl,
- msgSize: msgSize,
- eventType: eventType,
- eventSource: eventSource,
- fixedBody: fixedBody,
- body: body,
- }
-}
-
-func (cet CloudEventsTargeter) VegetaTargeter() vegeta.Targeter {
- uuidGen := fastuuid.MustNewGenerator()
-
- ceType := []string{cet.eventType}
- ceSource := []string{cet.eventSource}
- ceSpecVersion := []string{cloudevents.VersionV1}
- ceContentType := []string{cloudevents.ApplicationJSON}
-
- return func(t *vegeta.Target) error {
- t.Method = http.MethodPost
- t.URL = cet.sinkUrl
-
- t.Header = make(http.Header, 5)
-
- t.Header["Ce-Id"] = []string{uuidGen.Hex128()}
-
- t.Header["Ce-Type"] = ceType
- t.Header["Ce-Source"] = ceSource
- t.Header["Ce-Specversion"] = ceSpecVersion
- t.Header["Content-Type"] = ceContentType
- if cet.fixedBody {
- t.Body = cet.body
- } else {
- t.Body = generateRandStringPayload(cet.msgSize)
- }
-
- return nil
- }
-}
-
-type httpLoadGenerator struct {
- eventSource string
- sinkUrl string
-
- sentCh chan common.EventTimestamp
- acceptedCh chan common.EventTimestamp
-
- warmupAttacker *vegeta.Attacker
- paceAttacker *vegeta.Attacker
- ceClient cloudevents.Client
-}
-
-func NewHTTPLoadGeneratorFactory(sinkUrl string, minWorkers uint64) LoadGeneratorFactory {
- return func(eventSource string, sentCh chan common.EventTimestamp, acceptedCh chan common.EventTimestamp) (generator LoadGenerator, e error) {
- if sinkUrl == "" {
- panic("Missing --sink flag")
- }
-
- loadGen := &httpLoadGenerator{
- eventSource: eventSource,
- sinkUrl: sinkUrl,
-
- sentCh: sentCh,
- acceptedCh: acceptedCh,
- }
-
- loadGen.warmupAttacker = vegeta.NewAttacker(vegeta.Workers(minWorkers))
- loadGen.paceAttacker = vegeta.NewAttacker(
- vegeta.Client(&http.Client{
- Timeout: vegeta.DefaultTimeout,
- Transport: requestInterceptor{
- before: func(request *http.Request) {
- id := request.Header.Get("Ce-Id")
- loadGen.sentCh <- common.EventTimestamp{EventId: id, At: timestamppb.Now()}
- },
- transport: vegetaAttackerTransport(),
- after: func(request *http.Request, response *http.Response, e error) {
- id := request.Header.Get("Ce-Id")
- t := timestamppb.Now()
- if e == nil && response.StatusCode >= http.StatusOK && response.StatusCode < http.StatusMultipleChoices {
- loadGen.acceptedCh <- common.EventTimestamp{EventId: id, At: t}
- }
- },
- },
- }),
- vegeta.Workers(minWorkers),
- vegeta.MaxBody(0),
- )
-
- var err error
- loadGen.ceClient, err = newCloudEventsClient(sinkUrl)
- if err != nil {
- return nil, err
- }
-
- return loadGen, nil
- }
-}
-
-// Since we need to add an interceptor to keep track of timestamps before and after sending events,
-// we need to have our own Transport implementation.
-// At the same time we still need to use the one implemented in Vegeta, which is optimized to being able to generate
-// high loads. But since the function is not exported, we need to add it here in order to use it.
-// The below function is mostly copied from https://github.com/tsenart/vegeta/blob/44a49c878dd6f28f04b9b5ce5751490b0dce1e18/lib/attack.go#L80
-func vegetaAttackerTransport() *http.Transport {
- dialer := &net.Dialer{
- LocalAddr: &net.TCPAddr{IP: vegeta.DefaultLocalAddr.IP, Zone: vegeta.DefaultLocalAddr.Zone},
- KeepAlive: 30 * time.Second,
- }
-
- return &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: dialer.Dial,
- TLSClientConfig: vegeta.DefaultTLSConfig,
- MaxIdleConnsPerHost: vegeta.DefaultConnections,
- }
-}
-
-func newCloudEventsClient(sinkUrl string) (cloudevents.Client, error) {
- t, err := cloudevents.NewHTTP(
- cloudevents.WithTarget(sinkUrl),
- )
- if err != nil {
- return nil, fmt.Errorf("failed to create transport: %w", err)
- }
-
- return cloudevents.NewClient(t)
-}
-
-func (h httpLoadGenerator) Warmup(pace common.PaceSpec, msgSize uint, fixedBody bool) {
- targeter := NewCloudEventsTargeter(h.sinkUrl, msgSize, common.WarmupEventType, defaultEventSource, fixedBody).VegetaTargeter()
- vegetaResults := h.warmupAttacker.Attack(targeter, vegeta.ConstantPacer{Freq: pace.Rps, Per: time.Second}, pace.Duration, common.WarmupEventType+"-attack")
- for range vegetaResults {
- }
-}
-
-func (h httpLoadGenerator) RunPace(i int, pace common.PaceSpec, msgSize uint, fixedBody bool) {
- targeter := NewCloudEventsTargeter(h.sinkUrl, msgSize, common.MeasureEventType, eventsSource(), fixedBody).VegetaTargeter()
- res := h.paceAttacker.Attack(targeter, vegeta.ConstantPacer{Freq: pace.Rps, Per: time.Second}, pace.Duration, fmt.Sprintf("%s-attack-%d", h.eventSource, i))
- for range res {
- }
-}
-
-func (h httpLoadGenerator) SendGCEvent() {
- event := cloudevents.NewEvent(cloudevents.VersionV1)
- event.SetID(uuid.New().String())
- event.SetDataContentType(cloudevents.ApplicationJSON)
- event.SetType(common.GCEventType)
- event.SetSource(h.eventSource)
-
- _ = h.ceClient.Send(context.TODO(), event)
-}
-
-func (h httpLoadGenerator) SendEndEvent() {
- event := cloudevents.NewEvent(cloudevents.VersionV1)
- event.SetID(uuid.New().String())
- event.SetDataContentType(cloudevents.ApplicationJSON)
- event.SetType(common.EndEventType)
- event.SetSource(h.eventSource)
-
- _ = h.ceClient.Send(context.TODO(), event)
-}
diff --git a/test/performance/infra/sender/http_load_generator_test.go b/test/performance/infra/sender/http_load_generator_test.go
deleted file mode 100644
index 17761267626..00000000000
--- a/test/performance/infra/sender/http_load_generator_test.go
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-package sender
-
-import (
- "bytes"
- "testing"
-
- vegeta "github.com/tsenart/vegeta/v12/lib"
-)
-
-func TestGenerateRandStringPayload(t *testing.T) {
- const sizeRandomPayload = 10
-
- generated := generateRandStringPayload(sizeRandomPayload)
-
- if len(generated) != sizeRandomPayload {
- t.Errorf("len(generateRandStringPayload(sizeRandomPayload)) = %v, want %v", len(generated), sizeRandomPayload)
- }
-
- if generated[0] != markLetter {
- t.Errorf("generateRandStringPayload(sizeRandomPayload)[0] = %v, want %v", generated[0], markLetter)
- }
-
- if generated[sizeRandomPayload-1] != markLetter {
- t.Errorf("generateRandStringPayload(sizeRandomPayload)[sizeRandomPayload - 1] = %v, want %v", generated[sizeRandomPayload-1], markLetter)
- }
-}
-
-func TestVegetaTargeter(t *testing.T) {
- const sizeRandomPayload = 100
-
- for _, fixedPayload := range []bool{false, true} {
- expectedEventType := "test.event.type"
- expectedEventSource := "my.event.source"
- cet := NewCloudEventsTargeter("https://foo/bar", sizeRandomPayload, expectedEventType, expectedEventSource, fixedPayload)
- targeter := cet.VegetaTargeter()
-
- target1 := vegeta.Target{}
- if err := targeter(&target1); err != nil {
- t.Fatal("Targeter call returned error:", err)
- }
-
- nonEmptyHeaders := []string{"Ce-Id", "Ce-Type", "Ce-Source", "Ce-Specversion", "Content-Type"}
- for _, header := range nonEmptyHeaders {
- val, found := target1.Header[header]
- if !found {
- t.Fatal("Missing header:", header)
- } else if len(val) != 1 {
- t.Fatalf("Bad header(%s) length = %d, expected 1", header, len(val))
- }
- }
- ceType := target1.Header["Ce-Type"]
- if ceType[0] != expectedEventType {
- t.Errorf("Unexpected event type = %s, want %s", ceType, expectedEventType)
- }
- ceSource := target1.Header["Ce-Source"]
- if ceSource[0] != "my.event.source" {
- t.Errorf("Unexpected event type = %s, want %s", ceSource, expectedEventSource)
- }
- target2 := vegeta.Target{}
- if err := targeter(&target2); err != nil {
- t.Fatal("Targeter call returned error:", err)
- }
- if fixedPayload && !bytes.Equal(target1.Body, target2.Body) {
- t.Errorf("Target bodies differ, b1 = %v, b2 = %v", target1.Body, target2.Body)
- } else if !fixedPayload && bytes.Equal(target1.Body, target2.Body) {
- t.Error("Target bodies unexpectedly equal:", target1.Body)
- }
- ceID1 := target1.Header["Ce-Id"]
- ceID2 := target2.Header["Ce-Id"]
- if len(ceID1) == 1 && len(ceID2) == 1 {
- if ceID1[0] == ceID2[0] {
- t.Errorf("unexpectedly matching message ID's: %s, %s", ceID1, ceID2)
- }
- } else {
- t.Errorf("bad header id list lengths %d, %d, expected 1", len(ceID1), len(ceID2))
- }
- }
-}
-
-func BenchmarkTargeterFixedPayload(b *testing.B) {
- const sizeRandomPayload = 100
-
- cet := NewCloudEventsTargeter("https://foo/bar", sizeRandomPayload, "test.event.type", "my.event.source", true)
-
- targeter := cet.VegetaTargeter()
-
- for i := 0; i < b.N; i++ {
- target1 := vegeta.Target{}
- if err := targeter(&target1); err != nil {
- b.Fatalf("Targeter call returned error: %v", err)
- }
- }
-}
-
-func BenchmarkTargeterRandomPayload(b *testing.B) {
- const sizeRandomPayload = 100
-
- cet := NewCloudEventsTargeter("https://foo/bar", sizeRandomPayload, "test.event.type", "my.event.source", false)
-
- targeter := cet.VegetaTargeter()
-
- for i := 0; i < b.N; i++ {
- target1 := vegeta.Target{}
- if err := targeter(&target1); err != nil {
- b.Fatalf("Targeter call returned error: %v", err)
- }
- }
-}
diff --git a/test/performance/infra/sender/load_generator.go b/test/performance/infra/sender/load_generator.go
deleted file mode 100644
index 48d2817f6d4..00000000000
--- a/test/performance/infra/sender/load_generator.go
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package sender
-
-import (
- "knative.dev/eventing/test/performance/infra/common"
-)
-
-type LoadGenerator interface {
- // This method blocks till the warmup is complete
- Warmup(pace common.PaceSpec, msgSize uint, fixedBody bool)
-
- // This method blocks till the pace is complete
- RunPace(i int, pace common.PaceSpec, msgSize uint, fixedBody bool)
- SendGCEvent()
- SendEndEvent()
-}
-
-type LoadGeneratorFactory func(eventSource string, sentCh chan common.EventTimestamp,
- acceptedCh chan common.EventTimestamp) (LoadGenerator, error)
diff --git a/test/performance/infra/sender/request_interceptor.go b/test/performance/infra/sender/request_interceptor.go
deleted file mode 100644
index 1fd49656505..00000000000
--- a/test/performance/infra/sender/request_interceptor.go
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package sender
-
-import "net/http"
-
-type requestInterceptor struct {
- before func(*http.Request)
- transport http.RoundTripper
- after func(*http.Request, *http.Response, error)
-}
-
-func (r requestInterceptor) RoundTrip(request *http.Request) (*http.Response, error) {
- if r.before != nil {
- r.before(request)
- }
- res, err := r.transport.RoundTrip(request)
- if r.after != nil {
- r.after(request, res, err)
- }
- return res, err
-}
diff --git a/test/performance/infra/sender/request_interceptor_test.go b/test/performance/infra/sender/request_interceptor_test.go
deleted file mode 100644
index 02bed4beca1..00000000000
--- a/test/performance/infra/sender/request_interceptor_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package sender
-
-import (
- "net/http"
- "net/http/httptest"
- "testing"
-)
-
-func TestRequestInterceptor(t *testing.T) {
- ts := httptest.NewServer(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {}))
- defer ts.Close()
-
- var calledBefore, calledAfter bool
-
- ti := requestInterceptor{
- before: func(*http.Request) {
- calledBefore = true
- },
- transport: http.DefaultTransport,
- after: func(*http.Request, *http.Response, error) {
- calledAfter = true
- },
- }
-
- tc := http.Client{Transport: ti}
-
- if _, err := tc.Get(ts.URL); err != nil {
- t.Fatal("Failed to send request to mock server:", err)
- }
-
- if !calledBefore || !calledAfter {
- t.Errorf("Expected calls to before and after funcs (before: %t, after: %t)", calledBefore, calledAfter)
- }
-}
diff --git a/test/performance/infra/sender/sender.go b/test/performance/infra/sender/sender.go
deleted file mode 100644
index ad2c30a36e9..00000000000
--- a/test/performance/infra/sender/sender.go
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package sender
-
-import (
- "context"
- "fmt"
- "log"
- "os"
- "runtime"
- "time"
-
- "github.com/golang/protobuf/ptypes/timestamp"
-
- "knative.dev/eventing/test/performance/infra/common"
- pb "knative.dev/eventing/test/performance/infra/event_state"
-)
-
-const (
- defaultEventSource = "perf-test-event-source"
- warmupRps = 100
- podNameEnvVar = "POD_NAME"
-)
-
-type Sender struct {
- paceSpecs []common.PaceSpec
- msgSize uint
- warmupSeconds uint
- fixedBody bool
-
- // EventTimestamp channels
- sentCh chan common.EventTimestamp
- acceptedCh chan common.EventTimestamp
-
- // events recording maps
- sentEvents *pb.EventsRecord
- acceptedEvents *pb.EventsRecord
-
- // load generator
- loadGenerator LoadGenerator
-
- // aggregator GRPC client
- aggregatorClient *pb.AggregatorClient
-}
-
-func NewSender(loadGeneratorFactory LoadGeneratorFactory, aggregAddr string, msgSize uint, warmupSeconds uint, paceFlag string, fixedBody bool) (common.Executor, error) {
- pacerSpecs, err := common.ParsePaceSpec(paceFlag)
- if err != nil {
- return nil, fmt.Errorf("failed to parse pace spec: %v", err)
- }
-
- // create a connection to the aggregator
- aggregatorClient, err := pb.NewAggregatorClient(aggregAddr)
- if err != nil {
- return nil, fmt.Errorf("failed to connect to the aggregator: %v", err)
- }
-
- // We need those estimates to allocate memory before benchmark starts
- estimatedNumberOfMessagesInsideAChannel, estimatedNumberOfTotalMessages := common.CalculateMemoryConstraintsForPaceSpecs(pacerSpecs)
-
- // Small note: receivedCh depends on receive thpt and not send thpt but we
- // don't care since this is a pessimistic estimate and receive thpt < send thpt
- // PS after 3 weeks: Yeah I know this is not an entirely true assumption after the system becomes
- // unstable, but we are interested to understand when the system becomes unstable,
- // not what happens after
-
- executor := &Sender{
- msgSize: msgSize,
- warmupSeconds: warmupSeconds,
- paceSpecs: pacerSpecs,
- fixedBody: fixedBody,
-
- sentCh: make(chan common.EventTimestamp, estimatedNumberOfMessagesInsideAChannel),
- acceptedCh: make(chan common.EventTimestamp, estimatedNumberOfMessagesInsideAChannel),
-
- sentEvents: &pb.EventsRecord{
- Type: pb.EventsRecord_SENT,
- Events: make(map[string]*timestamp.Timestamp, estimatedNumberOfTotalMessages),
- },
- acceptedEvents: &pb.EventsRecord{
- Type: pb.EventsRecord_ACCEPTED,
- Events: make(map[string]*timestamp.Timestamp, estimatedNumberOfTotalMessages),
- },
-
- aggregatorClient: aggregatorClient,
- }
-
- executor.loadGenerator, err = loadGeneratorFactory(eventsSource(), executor.sentCh, executor.acceptedCh)
- if err != nil {
- return nil, err
- }
-
- return executor, nil
-}
-
-func (s *Sender) Run(ctx context.Context) {
- // --- Warmup phase
- log.Printf("--- BEGIN WARMUP ---")
- if s.warmupSeconds > 0 {
- if err := s.warmup(ctx, s.warmupSeconds); err != nil {
- log.Fatalf("Failed to run warmup: %v", err)
- }
- } else {
- log.Printf("Warmup skipped")
- }
- log.Printf("---- END WARMUP ----")
-
- log.Printf("--- BEGIN BENCHMARK ---")
-
- // Start the events processor
- log.Println("Starting events processor")
- go s.processEvents()
-
- // Clean mess before starting
- runtime.GC()
-
- log.Println("Starting benchmark")
-
- // Run all pace configurations
- benchmarkBeginning := time.Now()
- for i, pace := range s.paceSpecs {
- log.Printf("Starting pace %d° at %v rps for %v seconds", i+1, pace.Rps, pace.Duration)
- s.loadGenerator.RunPace(i, pace, s.msgSize, s.fixedBody)
-
- // Wait for flush
- time.Sleep(common.WaitForFlush)
-
- // Trigger GC
- log.Println("Triggering GC")
- s.loadGenerator.SendGCEvent()
- runtime.GC()
-
- // Wait for receivers GC
- time.Sleep(common.WaitForReceiverGC)
- }
-
- s.loadGenerator.SendEndEvent()
-
- log.Printf("Benchmark completed in %v", time.Since(benchmarkBeginning))
-
- s.closeChannels()
-
- log.Println("---- END BENCHMARK ----")
-
- log.Println("Sending collected data to the aggregator")
-
- log.Printf("%-15s: %d", "Sent count", len(s.sentEvents.Events))
- log.Printf("%-15s: %d", "Accepted count", len(s.acceptedEvents.Events))
-
- err := s.aggregatorClient.Publish(&pb.EventsRecordList{Items: []*pb.EventsRecord{
- s.sentEvents,
- s.acceptedEvents,
- }})
- if err != nil {
- log.Fatalf("Failed to send events record: %v\n", err)
- }
-}
-
-func (s *Sender) warmup(ctx context.Context, warmupSeconds uint) error {
- log.Println("Starting warmup")
-
- s.loadGenerator.Warmup(common.PaceSpec{Rps: warmupRps, Duration: time.Duration(warmupSeconds) * time.Second}, s.msgSize, s.fixedBody)
-
- // give the channel some time to drain the events it may still have enqueued
- time.Sleep(common.WaitAfterWarmup)
-
- return nil
-}
-
-func (s *Sender) closeChannels() {
- log.Printf("All requests sent")
-
- close(s.sentCh)
- close(s.acceptedCh)
-
- log.Printf("All channels closed")
-}
-
-// processEvents keeps a record of all events (sent, accepted, failed, received).
-func (s *Sender) processEvents() {
- for {
- select {
- case e, ok := <-s.sentCh:
- if !ok {
- continue
- }
- s.sentEvents.Events[e.EventId] = e.At
-
- case e, ok := <-s.acceptedCh:
- if !ok {
- continue
- }
- s.acceptedEvents.Events[e.EventId] = e.At
- }
- }
-}
-
-func eventsSource() string {
- if pn := os.Getenv(podNameEnvVar); pn != "" {
- return pn
- }
- return defaultEventSource
-}
diff --git a/test/performance/latency-and-thpt-plot.plg b/test/performance/latency-and-thpt-plot.plg
deleted file mode 100644
index 87c251ba302..00000000000
--- a/test/performance/latency-and-thpt-plot.plg
+++ /dev/null
@@ -1,14 +0,0 @@
-csv_file = ARG1
-latency_upper_bound = ARG2
-thpt_lower_bound = ARG3
-thpt_upper_bound = ARG4
-set datafile separator comma
-set y2tics thpt_lower_bound,thpt_upper_bound
-set ytics nomirror
-set y2range[thpt_lower_bound:thpt_upper_bound]
-plot csv_file u 1:($3 < latency_upper_bound ? $3 : 1/0) axis x1y1 title "Publish latency",\
- csv_file u 1:($6 < latency_upper_bound ? $6 : 1/0) axis x1y1 title "Receiver latency",\
- csv_file u 1:($4 < latency_upper_bound ? $4 : 1/0) axis x1y1 title "End to End latency",\
- csv_file u 1:($5 > thpt_lower_bound && $5 < thpt_upper_bound ? $5 : 1/0) with lines axis x1y2 title "Sender throughput",\
- csv_file u 1:($8 > thpt_lower_bound && $8 < thpt_upper_bound ? $8 : 1/0) with lines axis x1y2 title "Receiver throughput"
-pause -1
diff --git a/test/performance/latency-plot.plg b/test/performance/latency-plot.plg
deleted file mode 100644
index a60fa266099..00000000000
--- a/test/performance/latency-plot.plg
+++ /dev/null
@@ -1,6 +0,0 @@
-csv_file = ARG1
-set datafile separator comma
-plot csv_file u 1:3 title "Publish latency",\
- csv_file u 1:4 title "End to End latency",\
- csv_file u 1:6 title "Receiver latency",
-pause -1
diff --git a/test/performance/performance-tests.sh b/test/performance/performance-tests.sh
deleted file mode 100755
index 2519480c3c3..00000000000
--- a/test/performance/performance-tests.sh
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2020 The Knative Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# performance-tests.sh is added to manage all clusters that run the performance
-# benchmarks in eventing repo, it is ONLY intended to be run by Prow, users
-# should NOT run it manually.
-
-# Setup env vars to override the default settings
-export PROJECT_NAME="knative-eventing-performance"
-export BENCHMARK_ROOT_PATH="$GOPATH/src/knative.dev/eventing/test/performance/benchmarks"
-
-source vendor/knative.dev/hack/performance-tests.sh
-
-# Vars used in this script
-export TEST_CONFIG_VARIANT="continuous"
-export TEST_NAMESPACE="default"
-
-function update_knative() {
- echo ">> Update eventing core"
- ko apply --selector knative.dev/crd-install=true \
- -f config/ || abort "Failed to apply eventing CRDs"
-
- ko apply \
- -f config/ || abort "Failed to apply eventing resources"
-
- echo ">> Update InMemoryChannel"
- ko apply --selector knative.dev/crd-install=true \
- -f config/channels/in-memory-channel/ || abort "Failed to apply InMemoryChannel CRDs"
-
- ko apply \
- -f config/channels/in-memory-channel/ || abort "Failed to apply InMemoryChannel resources"
-
- echo ">> Update Broker"
- ko apply --selector knative.dev/crd-install=true \
- -f config/brokers/mt-channel-broker || abort "Failed to apply Broker CRD"
- ko apply \
- -f config/brokers/mt-channel-broker || abort "Failed to apply Broker resources"
-}
-
-function update_benchmark() {
- local benchmark_path="${BENCHMARK_ROOT_PATH}/$1"
- # TODO(chizhg): add update_environment function in test-infra/scripts/performance-tests.sh and move the below code there
- echo ">> Updating configmap"
- kubectl delete configmap config-mako -n "${TEST_NAMESPACE}" --ignore-not-found=true
- kubectl create configmap config-mako -n "${TEST_NAMESPACE}" --from-file="${benchmark_path}/prod.config" || abort "failed to create config-mako configmap"
- kubectl patch configmap config-mako -n "${TEST_NAMESPACE}" -p '{"data":{"environment":"prod"}}' || abort "failed to patch config-mako configmap"
-
- echo ">> Updating benchmark $1"
- # TODO(chizhg): remove --wait=false once https://github.com/knative/eventing/issues/2633 is fixed
- ko delete -f "${benchmark_path}"/${TEST_CONFIG_VARIANT} --ignore-not-found=true --wait=false
- ko apply -f "${benchmark_path}"/${TEST_CONFIG_VARIANT} || abort "failed to apply benchmark $1"
-}
-
-main $@
diff --git a/test/performance/sample-dev.config b/test/performance/sample-dev.config
deleted file mode 100644
index e5c39322e9c..00000000000
--- a/test/performance/sample-dev.config
+++ /dev/null
@@ -1,75 +0,0 @@
-### TODO: This is a sample Mako dev config. Copy this to your benchmark
-# directory and complete the TODOs.
-
-# Create this benchmark with the mako tool: mako create_benchmark dev.config
-# Update this benchmark with the mako tool: mako update_benchmark dev.config
-# Learn more about the mako tool at
-# https://github.com/google/mako/blob/master/docs/CLI.md.
-
-project_name: "Knative"
-# TODO: Update the benchmark name and description to describe its purpose. Since
-# this is a dev benchmark, keep the Development prefix in the name.
-benchmark_name: "Development - Sample Benchmark"
-description: "Measure latency and throughput of the sample component."
-# TODO: Fill in with the key returned by Mako when the benchmark is created.
-benchmark_key: ''
-
-# Human owners that can update the benchmark.
-owner_list: "grantrodgers@google.com"
-owner_list: "chizhg@google.com"
-owner_list: "xiyue@google.com"
-owner_list: "gracegao@google.com"
-owner_list: "nachocano@google.com"
-owner_list: "cshou@google.com"
-# TODO: If you want to update the benchmark config using the mako tool, add
-# your email to this list.
-
-# GCP Service Accounts that can publish data to Mako.
-owner_list: "mako-job@knative-performance.iam.gserviceaccount.com"
-owner_list: "mako-upload@grantrodgers-crd.iam.gserviceaccount.com"
-owner_list: "mako-upload@xiyue-knative-project.iam.gserviceaccount.com"
-owner_list: "mako-upload@gracegao-knative-gcp-testing.iam.gserviceaccount.com"
-owner_list: "mako-upload@knative-project-228222.iam.gserviceaccount.com"
-owner_list: "mako-upload@cshou-playground.iam.gserviceaccount.com"
-# TODO: If you want to run the benchmark and submit data using your own service
-# account, add its email to this list.
-
-# TODO: Replace the following with the equivalent configuration from
-# test/test_images/performance/mako.config. This version is likely obsolete.
-
-# Define the name and type for x-axis of run charts
-# input_value_info: {
-# value_key: "t"
-# label: "time"
-# type: TIMESTAMP
-# }
-#
-# Note: value_key is stored repeatedly and should be very short (ideally one or two characters).
-# metric_info_list: {
-# value_key: "pl"
-# label: "publish-latency"
-# }
-# metric_info_list: {
-# value_key: "pe"
-# label: "publish-errors"
-# }
-# metric_info_list: {
-# value_key: "st"
-# label: "send-throughput"
-# }
-# metric_info_list: {
-# value_key: "dl"
-# label: "deliver-latency"
-# }
-# metric_info_list: {
-# value_key: "de"
-# label: "deliver-errors"
-# }
-# metric_info_list: {
-# value_key: "dt"
-# label: "deliver-throughput"
-# }
-# metric_info_list: {
-# value_key: "ft"
-# label: "failure-throughput"
-# }
diff --git a/test/performance/sample-prod.config b/test/performance/sample-prod.config
deleted file mode 100644
index feb9ea8326f..00000000000
--- a/test/performance/sample-prod.config
+++ /dev/null
@@ -1,68 +0,0 @@
-### TODO: This is a sample Mako prod config. Copy this to your benchmark
-# directory and complete the TODOs.
-
-# Create this benchmark with the mako tool: mako create_benchmark prod.config
-# Update this benchmark with the mako tool: mako update_benchmark prod.config
-# Learn more about the mako tool at
-# https://github.com/google/mako/blob/master/docs/CLI.md.
-
-project_name: "Knative"
-# TODO: Update the benchmark name and description to describe its purpose.
-benchmark_name: "Sample Benchmark"
-description: "Measure latency and throughput of the sample component."
-# TODO: Fill in with the key returned by Mako when the benchmark is created.
-benchmark_key: ''
-
-# Human owners that can update the benchmark.
-owner_list: "grantrodgers@google.com"
-owner_list: "chizhg@google.com"
-owner_list: "xiyue@google.com"
-owner_list: "gracegao@google.com"
-owner_list: "nachocano@google.com"
-owner_list: "cshou@google.com"
-# TODO: If you want to update the benchmark config using the mako tool, add
-# your email to this list.
-
-# GCP Service Accounts that can publish data to Mako. Since this is a prod
-# benchmark, only the CI account should be listed here.
-owner_list: "mako-job@knative-performance.iam.gserviceaccount.com"
-
-# TODO: Replace the following with the equivalent configuration from
-# test/test_images/performance/mako.config. This version is likely obsolete.
-
-# Define the name and type for x-axis of run charts
-# input_value_info: {
-# value_key: "t"
-# label: "time"
-# type: TIMESTAMP
-# }
-#
-# Note: value_key is stored repeatedly and should be very short (ideally one or two characters).
-# metric_info_list: {
-# value_key: "pl"
-# label: "publish-latency"
-# }
-# metric_info_list: {
-# value_key: "pe"
-# label: "publish-errors"
-# }
-# metric_info_list: {
-# value_key: "st"
-# label: "send-throughput"
-# }
-# metric_info_list: {
-# value_key: "dl"
-# label: "deliver-latency"
-# }
-# metric_info_list: {
-# value_key: "de"
-# label: "deliver-errors"
-# }
-# metric_info_list: {
-# value_key: "dt"
-# label: "deliver-throughput"
-# }
-# metric_info_list: {
-# value_key: "ft"
-# label: "failure-throughput"
-# }
diff --git a/test/performance/thpt-plot.plg b/test/performance/thpt-plot.plg
deleted file mode 100644
index b483267b4fe..00000000000
--- a/test/performance/thpt-plot.plg
+++ /dev/null
@@ -1,4 +0,0 @@
-csv_file = ARG1
-set datafile separator comma
-plot csv_file u 1:5 with lines title "Sender throughput", csv_file u 1:8 with lines title "Receiver throughput"
-pause -1
diff --git a/test/test_images/performance/OWNERS b/test/test_images/performance/OWNERS
deleted file mode 100644
index 6dd1e299bf0..00000000000
--- a/test/test_images/performance/OWNERS
+++ /dev/null
@@ -1,12 +0,0 @@
-# The OWNERS file is used by prow to automatically merge approved PRs.
-
-approvers:
-- productivity-writers
-- slinkydeveloper
-
-reviewers:
-- productivity-writers
-- slinkydeveloper
-
-labels:
-- area/performance
diff --git a/test/test_images/performance/README.md b/test/test_images/performance/README.md
deleted file mode 100644
index 715e93c210b..00000000000
--- a/test/test_images/performance/README.md
+++ /dev/null
@@ -1,125 +0,0 @@
-# Performance test image
-
-This image is designed to benchmark Knative Eventing channel/brokers.
-
-The image contains three different functionality, configurable with `--roles`
-flag:
-
-- `sender`: Act as sender
-- `receiver`: Act as receiver
-- `aggregator`: Act as aggregator of timestamps
-
-The image does both the sender and receiver role, allowing the clock to be
-synchronized to correctly calculate latencies (only valid with a single
-sender-receiver).
-
-Latencies are calculated and published to the Mako sidecar container by a
-separate aggregator. In particular:
-
-- Publish latency represents how long the sender took to publish the event to
- its sink
-- Delivery latency represents how much long the event took to go from the
- "intent" of publish (before publishing request happens) to the delivery of the
- event in the receiver
-
-Throughputs are calculated doing the finite difference of timestamp - count
-vector. In particular:
-
-- Send throughput are calculated from the timestamps stored before the publish
- request
-- Delivery throughput are calculated from the timestamps stored when the message
- are received
-- Failures throughput are calculated from the timestamps / failures vector
-
-The image is designed to allocate as much memory as possible before the
-benchmark starts. We suggest to disable Go GC to avoid useless GC pauses.
-
-## Usage
-
-Example of how to use this image with the Mako stub sidecar:
-
-```yaml
-apiVersion: v1
-kind: Pod
-metadata:
- name: latency-test
- namespace: perf-eventing
- labels:
- role: latency-test-consumer
-spec:
- serviceAccountName: default
- restartPolicy: Never
- containers:
- - name: latency-test
- image: knative.dev/eventing/test/test_images/performance
- resources:
- requests:
- cpu: 1000m
- memory: 2Gi
- ports:
- - name: cloudevents
- containerPort: 8080
- args:
- - "--roles=sender,receiver"
- - "--sink=http://in-memory-test-broker-broker.perf-eventing.svc.cluster.local"
- - "--aggregator=localhost:10000"
- - "--pace=100:10,200:20,400:60"
- - "--warmup=10"
- env:
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- volumeMounts:
- - name: config-mako
- mountPath: /etc/config-mako
- terminationMessagePolicy: FallbackToLogsOnError
- - name: aggregator
- image: knative.dev/eventing/test/test_images/performance
- ports:
- - name: grpc
- containerPort: 10000
- args:
- - "--roles=aggregator"
- - "--expect-records=2"
- terminationMessagePolicy: FallbackToLogsOnError
- - name: mako-stub
- image: knative.dev/pkg/test/mako/stub-sidecar
- terminationMessagePolicy: FallbackToLogsOnError
- volumes:
- - name: config-mako
- configMap:
- name: config-mako
-```
-
-### Pace configuration
-
-`pace` is a comma separated array of pace configurations in format
-`rps[:duration=10s]`.
-
-For example the configuration `100,200:20,400:60` means:
-
-1. 100 rps for 10 seconds
-2. 200 rps for 20 seconds
-3. 400 rps for 60 seconds
-
-### Warmup phase
-
-You can configure a warmup phase to warm the hot path of channel
-implementations. This is especially required while working with JVM or similar
-environments. During the warmup phase, no latencies are calculated.
-
-To configure the duration of warmup phase, use flag `warmup` specifying the
-number of seconds.
-
-If you don't want a warmup phase, use `--warmup=0`.
-
-### Workers
-
-You can specify the number of initial vegeta workers that perform requests with
-flag `workers`.
-
-### Aggregator configuration
-
-`--expect-records` must be equal to number sender + number receivers. If a same
-instance does both the sender and receiver, it counts twice
diff --git a/test/test_images/performance/kodata/HEAD b/test/test_images/performance/kodata/HEAD
deleted file mode 120000
index 481bd4eff49..00000000000
--- a/test/test_images/performance/kodata/HEAD
+++ /dev/null
@@ -1 +0,0 @@
-../../../../.git/HEAD
\ No newline at end of file
diff --git a/test/test_images/performance/kodata/refs b/test/test_images/performance/kodata/refs
deleted file mode 120000
index fe164fe40f7..00000000000
--- a/test/test_images/performance/kodata/refs
+++ /dev/null
@@ -1 +0,0 @@
-../../../../.git/refs
\ No newline at end of file
diff --git a/test/test_images/performance/main.go b/test/test_images/performance/main.go
deleted file mode 100644
index 8ef09d27be1..00000000000
--- a/test/test_images/performance/main.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-import (
- "flag"
-
- "knative.dev/eventing/test/performance/infra"
- "knative.dev/eventing/test/performance/infra/receiver"
- "knative.dev/eventing/test/performance/infra/sender"
-)
-
-var minWorkers uint64
-var sinkURL string
-
-func init() {
- infra.DeclareFlags()
-
- // Specific to http load generator
- flag.Uint64Var(&minWorkers, "min-workers", 10, "Number of vegeta workers")
- flag.StringVar(&sinkURL, "sink", "", "The sink URL for the event destination.")
-}
-
-func main() {
- flag.Parse()
-
- infra.StartPerformanceImage(sender.NewHTTPLoadGeneratorFactory(sinkURL, minWorkers), receiver.EventTypeExtractor, receiver.EventIdExtractor)
-}
diff --git a/test/test_images/performance/mako.config b/test/test_images/performance/mako.config
deleted file mode 100644
index 77263069221..00000000000
--- a/test/test_images/performance/mako.config
+++ /dev/null
@@ -1,51 +0,0 @@
-project_name: "Knative"
-benchmark_name: "Generic Eventing Benchmark"
-description: "Measure latency and throughput of an eventing component."
-
-### ATTENTION: This is a sample benchmark config. Do not attempt to create or
-# update this benchmark with the mako tool. See
-test/performance/sample-dev.config for an example of a real benchmark config.
-
-# Any changes made below this comment must be copied to all
-# benchmark config files in test/performance/benchmarks.
-
-# Define the name and type for x-axis of run charts
-input_value_info: {
- value_key: "t"
- label: "time"
- type: TIMESTAMP
-}
-
-# Note: value_key is stored repeatedly and should be very short (ideally one or two characters).
-metric_info_list: {
- value_key: "pl"
- label: "publish-latency"
-}
-metric_info_list: {
- value_key: "pe"
- label: "publish-errors"
-}
-metric_info_list: {
- value_key: "st"
- label: "send-throughput"
-}
-metric_info_list: {
- value_key: "dl"
- label: "deliver-latency"
-}
-metric_info_list: {
- value_key: "de"
- label: "deliver-errors"
-}
-metric_info_list: {
- value_key: "dt"
- label: "deliver-throughput"
-}
-metric_info_list: {
- value_key: "pet"
- label: "publish-failure-throughput"
-}
-metric_info_list: {
- value_key: "det"
- label: "deliver-failure-throughput"
-}
diff --git a/test/test_images/performance/pod.yaml b/test/test_images/performance/pod.yaml
deleted file mode 100644
index dbe1a975aed..00000000000
--- a/test/test_images/performance/pod.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-# This file is required to trigger ko resolve in upload-test-images.sh
-# To run the performance image, look at test/performance/README.md
-
-apiVersion: v1
-kind: Pod
-metadata:
- name: performance
-spec:
- containers:
- - name: performance
- image: ko://knative.dev/eventing/test/test_images/performance
-
diff --git a/third_party/VENDOR-LICENSE/github.com/golang/glog/LICENSE b/third_party/VENDOR-LICENSE/github.com/golang/glog/LICENSE
deleted file mode 100644
index 37ec93a14fd..00000000000
--- a/third_party/VENDOR-LICENSE/github.com/golang/glog/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/third_party/VENDOR-LICENSE/github.com/google/go-github/v27/github/LICENSE b/third_party/VENDOR-LICENSE/github.com/google/go-github/v27/github/LICENSE
deleted file mode 100644
index 28b6486f0b8..00000000000
--- a/third_party/VENDOR-LICENSE/github.com/google/go-github/v27/github/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2013 The go-github AUTHORS. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/VENDOR-LICENSE/github.com/google/go-querystring/query/LICENSE b/third_party/VENDOR-LICENSE/github.com/google/go-querystring/query/LICENSE
deleted file mode 100644
index ae121a1e46d..00000000000
--- a/third_party/VENDOR-LICENSE/github.com/google/go-querystring/query/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2013 Google. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/VENDOR-LICENSE/github.com/google/mako/LICENSE b/third_party/VENDOR-LICENSE/github.com/google/mako/LICENSE
deleted file mode 100644
index fef7d967815..00000000000
--- a/third_party/VENDOR-LICENSE/github.com/google/mako/LICENSE
+++ /dev/null
@@ -1,204 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
\ No newline at end of file
diff --git a/third_party/VENDOR-LICENSE/github.com/influxdata/tdigest/LICENSE b/third_party/VENDOR-LICENSE/github.com/influxdata/tdigest/LICENSE
deleted file mode 100644
index ebb2bfb1aa8..00000000000
--- a/third_party/VENDOR-LICENSE/github.com/influxdata/tdigest/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2018 InfluxData Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/third_party/VENDOR-LICENSE/github.com/rogpeppe/fastuuid/LICENSE b/third_party/VENDOR-LICENSE/github.com/rogpeppe/fastuuid/LICENSE
deleted file mode 100644
index 9525fc82565..00000000000
--- a/third_party/VENDOR-LICENSE/github.com/rogpeppe/fastuuid/LICENSE
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright © 2014, Roger Peppe
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
- * Neither the name of this project nor the names of its contributors
- may be used to endorse or promote products derived from this software
- without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
-TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/VENDOR-LICENSE/github.com/rs/dnscache/LICENSE b/third_party/VENDOR-LICENSE/github.com/rs/dnscache/LICENSE
deleted file mode 100644
index 71abfee39c7..00000000000
--- a/third_party/VENDOR-LICENSE/github.com/rs/dnscache/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2018 Olivier Poitrey
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/third_party/VENDOR-LICENSE/github.com/tsenart/vegeta/v12/lib/LICENSE b/third_party/VENDOR-LICENSE/github.com/tsenart/vegeta/v12/lib/LICENSE
deleted file mode 100644
index d396b44b381..00000000000
--- a/third_party/VENDOR-LICENSE/github.com/tsenart/vegeta/v12/lib/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013-2023 Tomás Senart
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md
index 2da498b8e7a..435252c7e95 100644
--- a/vendor/cloud.google.com/go/storage/CHANGES.md
+++ b/vendor/cloud.google.com/go/storage/CHANGES.md
@@ -1,6 +1,25 @@
# Changes
+## [1.42.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.41.0...storage/v1.42.0) (2024-06-10)
+
+
+### Features
+
+* **storage:** Add new package transfermanager. This package is intended for parallel uploads and downloads, and is in preview. It is not stable, and is likely to change. ([#10045](https://github.com/googleapis/google-cloud-go/issues/10045)) ([cde5cbb](https://github.com/googleapis/google-cloud-go/commit/cde5cbba3145d5a702683656a42158621234fe71))
+* **storage:** Add bucket HierarchicalNamespace ([#10315](https://github.com/googleapis/google-cloud-go/issues/10315)) ([b92406c](https://github.com/googleapis/google-cloud-go/commit/b92406ccfadfdcee379e86d6f78c901d772401a9)), refs [#10146](https://github.com/googleapis/google-cloud-go/issues/10146)
+* **storage:** Add BucketName to BucketHandle ([#10127](https://github.com/googleapis/google-cloud-go/issues/10127)) ([203cc59](https://github.com/googleapis/google-cloud-go/commit/203cc599e5e2f2f821dc75b47c5a4c9073333f05))
+
+
+### Bug Fixes
+
+* **storage:** Set invocation headers on xml reads ([#10250](https://github.com/googleapis/google-cloud-go/issues/10250)) ([c87e1ab](https://github.com/googleapis/google-cloud-go/commit/c87e1ab6f9618b8b3f4d0005ac159abd87b0daaf))
+
+
+### Documentation
+
+* **storage:** Update autoclass doc ([#10135](https://github.com/googleapis/google-cloud-go/issues/10135)) ([e4b2737](https://github.com/googleapis/google-cloud-go/commit/e4b2737ddc16d3bf8139a6def7326ac905f62acd))
+
## [1.41.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.40.0...storage/v1.41.0) (2024-05-13)
diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go
index 74799e55e98..560a5605d0b 100644
--- a/vendor/cloud.google.com/go/storage/acl.go
+++ b/vendor/cloud.google.com/go/storage/acl.go
@@ -16,8 +16,6 @@ package storage
import (
"context"
- "net/http"
- "reflect"
"cloud.google.com/go/internal/trace"
"cloud.google.com/go/storage/internal/apiv2/storagepb"
@@ -162,15 +160,6 @@ func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...)
}
-func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
- vc := reflect.ValueOf(call)
- vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
- if a.userProject != "" {
- vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
- }
- setClientHeader(call.Header())
-}
-
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go
index d2da86e914b..e69d1e61e28 100644
--- a/vendor/cloud.google.com/go/storage/bucket.go
+++ b/vendor/cloud.google.com/go/storage/bucket.go
@@ -116,6 +116,11 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
return &b.defaultObjectACL
}
+// BucketName returns the name of the bucket.
+func (b *BucketHandle) BucketName() string {
+ return b.name
+}
+
// Object returns an ObjectHandle, which provides operations on the named object.
// This call does not perform any network operations such as fetching the object or verifying its existence.
// Use methods on ObjectHandle to perform network operations.
@@ -486,6 +491,13 @@ type BucketAttrs struct {
// 7 day retention duration. In order to fully disable soft delete, you need
// to set a policy with a RetentionDuration of 0.
SoftDeletePolicy *SoftDeletePolicy
+
+ // HierarchicalNamespace contains the bucket's hierarchical namespace
+ // configuration. Hierarchical namespace enabled buckets can contain
+ // [cloud.google.com/go/storage/control/apiv2/controlpb.Folder] resources.
+ // It cannot be modified after bucket creation time.
+ // UniformBucketLevelAccess must also also be enabled on the bucket.
+ HierarchicalNamespace *HierarchicalNamespace
}
// BucketPolicyOnly is an alias for UniformBucketLevelAccess.
@@ -767,6 +779,7 @@ type Autoclass struct {
// TerminalStorageClass: The storage class that objects in the bucket
// eventually transition to if they are not read for a certain length of
// time. Valid values are NEARLINE and ARCHIVE.
+ // To modify TerminalStorageClass, Enabled must be set to true.
TerminalStorageClass string
// TerminalStorageClassUpdateTime represents the time of the most recent
// update to "TerminalStorageClass".
@@ -786,6 +799,15 @@ type SoftDeletePolicy struct {
RetentionDuration time.Duration
}
+// HierarchicalNamespace contains the bucket's hierarchical namespace
+// configuration. Hierarchical namespace enabled buckets can contain
+// [cloud.google.com/go/storage/control/apiv2/controlpb.Folder] resources.
+type HierarchicalNamespace struct {
+ // Enabled indicates whether hierarchical namespace features are enabled on
+ // the bucket. This can only be set at bucket creation time currently.
+ Enabled bool
+}
+
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
if b == nil {
return nil, nil
@@ -824,6 +846,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig),
Autoclass: toAutoclassFromRaw(b.Autoclass),
SoftDeletePolicy: toSoftDeletePolicyFromRaw(b.SoftDeletePolicy),
+ HierarchicalNamespace: toHierarchicalNamespaceFromRaw(b.HierarchicalNamespace),
}, nil
}
@@ -858,6 +881,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based
Autoclass: toAutoclassFromProto(b.GetAutoclass()),
SoftDeletePolicy: toSoftDeletePolicyFromProto(b.SoftDeletePolicy),
+ HierarchicalNamespace: toHierarchicalNamespaceFromProto(b.HierarchicalNamespace),
}
}
@@ -914,6 +938,7 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(),
Autoclass: b.Autoclass.toRawAutoclass(),
SoftDeletePolicy: b.SoftDeletePolicy.toRawSoftDeletePolicy(),
+ HierarchicalNamespace: b.HierarchicalNamespace.toRawHierarchicalNamespace(),
}
}
@@ -975,6 +1000,7 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(),
Autoclass: b.Autoclass.toProtoAutoclass(),
SoftDeletePolicy: b.SoftDeletePolicy.toProtoSoftDeletePolicy(),
+ HierarchicalNamespace: b.HierarchicalNamespace.toProtoHierarchicalNamespace(),
}
}
@@ -1174,6 +1200,9 @@ type BucketAttrsToUpdate struct {
RPO RPO
// If set, updates the autoclass configuration of the bucket.
+ // To disable autoclass on the bucket, set to an empty &Autoclass{}.
+ // To update the configuration for Autoclass.TerminalStorageClass,
+ // Autoclass.Enabled must also be set to true.
// See https://cloud.google.com/storage/docs/using-autoclass for more information.
Autoclass *Autoclass
@@ -2136,6 +2165,42 @@ func toSoftDeletePolicyFromProto(p *storagepb.Bucket_SoftDeletePolicy) *SoftDele
}
}
+func (hns *HierarchicalNamespace) toProtoHierarchicalNamespace() *storagepb.Bucket_HierarchicalNamespace {
+ if hns == nil {
+ return nil
+ }
+ return &storagepb.Bucket_HierarchicalNamespace{
+ Enabled: hns.Enabled,
+ }
+}
+
+func (hns *HierarchicalNamespace) toRawHierarchicalNamespace() *raw.BucketHierarchicalNamespace {
+ if hns == nil {
+ return nil
+ }
+ return &raw.BucketHierarchicalNamespace{
+ Enabled: hns.Enabled,
+ }
+}
+
+func toHierarchicalNamespaceFromProto(p *storagepb.Bucket_HierarchicalNamespace) *HierarchicalNamespace {
+ if p == nil {
+ return nil
+ }
+ return &HierarchicalNamespace{
+ Enabled: p.Enabled,
+ }
+}
+
+func toHierarchicalNamespaceFromRaw(r *raw.BucketHierarchicalNamespace) *HierarchicalNamespace {
+ if r == nil {
+ return nil
+ }
+ return &HierarchicalNamespace{
+ Enabled: r.Enabled,
+ }
+}
+
// Objects returns an iterator over the objects in the bucket that match the
// Query q. If q is nil, no filtering is done. Objects will be iterated over
// lexicographically by name.
diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go
index 1b9fbe9dd20..f7811a5d140 100644
--- a/vendor/cloud.google.com/go/storage/hmac.go
+++ b/vendor/cloud.google.com/go/storage/hmac.go
@@ -272,7 +272,6 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string,
// TODO: Remove fetch method upon integration. This method is internalized into
// httpStorageClient.ListHMACKeys() as it is the only caller.
call := it.raw.List(it.projectID)
- setClientHeader(call.Header())
if pageToken != "" {
call = call.PageToken(pageToken)
}
diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go
index e01ae9c4284..0e213a6632a 100644
--- a/vendor/cloud.google.com/go/storage/http_client.go
+++ b/vendor/cloud.google.com/go/storage/http_client.go
@@ -176,7 +176,6 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket st
bkt.Location = "US"
}
req := c.raw.Buckets.Insert(project, bkt)
- setClientHeader(req.Header())
if attrs != nil && attrs.PredefinedACL != "" {
req.PredefinedAcl(attrs.PredefinedACL)
}
@@ -207,7 +206,6 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt
fetch := func(pageSize int, pageToken string) (token string, err error) {
req := c.raw.Buckets.List(it.projectID)
- setClientHeader(req.Header())
req.Projection("full")
req.Prefix(it.Prefix)
req.PageToken(pageToken)
@@ -245,7 +243,6 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt
func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error {
s := callSettings(c.settings, opts...)
req := c.raw.Buckets.Delete(bucket)
- setClientHeader(req.Header())
if err := applyBucketConds("httpStorageClient.DeleteBucket", conds, req); err != nil {
return err
}
@@ -259,7 +256,6 @@ func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, con
func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
req := c.raw.Buckets.Get(bucket).Projection("full")
- setClientHeader(req.Header())
err := applyBucketConds("httpStorageClient.GetBucket", conds, req)
if err != nil {
return nil, err
@@ -287,7 +283,6 @@ func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uat
s := callSettings(c.settings, opts...)
rb := uattrs.toRawBucket()
req := c.raw.Buckets.Patch(bucket, rb).Projection("full")
- setClientHeader(req.Header())
err := applyBucketConds("httpStorageClient.UpdateBucket", conds, req)
if err != nil {
return nil, err
@@ -340,7 +335,6 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
if it.query.SoftDeleted {
req.SoftDeleted(it.query.SoftDeleted)
}
- setClientHeader(req.Header())
projection := it.query.Projection
if projection == ProjectionDefault {
projection = ProjectionFull
@@ -666,7 +660,7 @@ func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string,
}, s.retry, s.idempotent)
}
-// configureACLCall sets the context, user project and headers on the apiary library call.
+// configureACLCall sets the context and user project on the apiary library call.
// This will panic if the call does not have the correct methods.
func configureACLCall(ctx context.Context, userProject string, call interface{ Header() http.Header }) {
vc := reflect.ValueOf(call)
@@ -674,7 +668,6 @@ func configureACLCall(ctx context.Context, userProject string, call interface{ H
if userProject != "" {
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(userProject)})
}
- setClientHeader(call.Header())
}
// Object ACL methods.
@@ -760,7 +753,6 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec
return nil, err
}
var obj *raw.Object
- setClientHeader(call.Header())
var err error
retryCall := func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }
@@ -809,7 +801,6 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
var res *raw.RewriteResponse
var err error
- setClientHeader(call.Header())
retryCall := func(ctx context.Context) error { res, err = call.Context(ctx).Do(); return err }
@@ -864,17 +855,18 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa
return nil, err
}
- // Set custom headers passed in via the context. This is only required for XML;
- // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively.
- ctxHeaders := callctx.HeadersFromContext(ctx)
- for k, vals := range ctxHeaders {
- for _, v := range vals {
- req.Header.Add(k, v)
- }
- }
-
reopen := readerReopen(ctx, req.Header, params, s,
- func(ctx context.Context) (*http.Response, error) { return c.hc.Do(req.WithContext(ctx)) },
+ func(ctx context.Context) (*http.Response, error) {
+ // Set custom headers passed in via the context. This is only required for XML;
+ // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively.
+ ctxHeaders := callctx.HeadersFromContext(ctx)
+ for k, vals := range ctxHeaders {
+ for _, v := range vals {
+ req.Header.Set(k, v)
+ }
+ }
+ return c.hc.Do(req.WithContext(ctx))
+ },
func() error { return setConditionsHeaders(req.Header, params.conds) },
func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) })
@@ -888,7 +880,6 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa
func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) {
call := c.raw.Objects.Get(params.bucket, params.object)
- setClientHeader(call.Header())
call.Projection("full")
if s.userProject != "" {
@@ -1004,7 +995,6 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage
func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) {
s := callSettings(c.settings, opts...)
call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(version))
- setClientHeader(call.Header())
if s.userProject != "" {
call.UserProject(s.userProject)
}
@@ -1025,7 +1015,6 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p
rp := iamToStoragePolicy(policy)
call := c.raw.Buckets.SetIamPolicy(resource, rp)
- setClientHeader(call.Header())
if s.userProject != "" {
call.UserProject(s.userProject)
}
@@ -1039,7 +1028,6 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p
func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) {
s := callSettings(c.settings, opts...)
call := c.raw.Buckets.TestIamPermissions(resource, permissions)
- setClientHeader(call.Header())
if s.userProject != "" {
call.UserProject(s.userProject)
}
@@ -1088,7 +1076,6 @@ func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAc
}
fetch := func(pageSize int, pageToken string) (token string, err error) {
call := c.raw.Projects.HmacKeys.List(project)
- setClientHeader(call.Header())
if pageToken != "" {
call = call.PageToken(pageToken)
}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
index 47300d7a10f..82ec5db902b 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
@@ -962,7 +962,9 @@ func (c *gRPCClient) Connection() *grpc.ClientConn {
func (c *gRPCClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
}
// Close closes the connection to the API service. The user should invoke this when
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
index b63d664e5e2..9f1c4fb2238 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.33.0
+// protoc-gen-go v1.34.1
// protoc v4.25.3
// source: google/storage/v2/storage.proto
diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go
index c3cf41cb718..f37b860085c 100644
--- a/vendor/cloud.google.com/go/storage/internal/version.go
+++ b/vendor/cloud.google.com/go/storage/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.41.0"
+const Version = "1.42.0"
diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go
index ffc49a808d3..b97f8ed7a1d 100644
--- a/vendor/cloud.google.com/go/storage/invoke.go
+++ b/vendor/cloud.google.com/go/storage/invoke.go
@@ -84,7 +84,21 @@ func setInvocationHeaders(ctx context.Context, invocationID string, attempts int
invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts)
xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ")
- ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader)
+ // TODO: remove this once the respective transport packages merge xGoogHeader.
+ // Also remove gl-go at that time, as it will be repeated.
+ hdrs := callctx.HeadersFromContext(ctx)
+ for _, v := range hdrs[xGoogHeaderKey] {
+ xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ")
+ }
+
+ if hdrs[xGoogHeaderKey] != nil {
+ // Replace the key instead of adding it, if there was anything to merge with.
+ hdrs[xGoogHeaderKey] = []string{xGoogHeader}
+ } else {
+ // TODO: keep this line when removing the above code.
+ ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader)
+ }
+
ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID)
return ctx
}
diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go
index e72ceb78f06..debdb0f52d5 100644
--- a/vendor/cloud.google.com/go/storage/option.go
+++ b/vendor/cloud.google.com/go/storage/option.go
@@ -44,10 +44,14 @@ type storageClientOption interface {
ApplyStorageOpt(*storageConfig)
}
-// WithJSONReads is an option that may be passed to a Storage Client on creation.
-// It sets the client to use the JSON API for object reads. Currently, the
-// default API used for reads is XML.
-// Setting this option is required to use the GenerationNotMatch condition.
+// WithJSONReads is an option that may be passed to [NewClient].
+// It sets the client to use the Cloud Storage JSON API for object
+// reads. Currently, the default API used for reads is XML, but JSON will
+// become the default in a future release.
+//
+// Setting this option is required to use the GenerationNotMatch condition. We
+// also recommend using JSON reads to ensure consistency with other client
+// operations (all of which use JSON by default).
//
// Note that when this option is set, reads will return a zero date for
// [ReaderObjectAttrs].LastModified and may return a different value for
@@ -56,10 +60,11 @@ func WithJSONReads() option.ClientOption {
return &withReadAPI{useJSON: true}
}
-// WithXMLReads is an option that may be passed to a Storage Client on creation.
-// It sets the client to use the XML API for object reads.
+// WithXMLReads is an option that may be passed to [NewClient].
+// It sets the client to use the Cloud Storage XML API for object reads.
//
-// This is the current default.
+// This is the current default, but the default will switch to JSON in a future
+// release.
func WithXMLReads() option.ClientOption {
return &withReadAPI{useJSON: false}
}
diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go
index 0b228a6a76c..6da2432f004 100644
--- a/vendor/cloud.google.com/go/storage/reader.go
+++ b/vendor/cloud.google.com/go/storage/reader.go
@@ -72,6 +72,12 @@ type ReaderObjectAttrs struct {
// ErrObjectNotExist will be returned if the object is not found.
//
// The caller must call Close on the returned Reader when done reading.
+//
+// By default, reads are made using the Cloud Storage XML API. We recommend
+// using the JSON API instead, which can be done by setting [WithJSONReads]
+// when calling [NewClient]. This ensures consistency with other client
+// operations, which all use JSON. JSON will become the default in a future
+// release.
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
return o.NewRangeReader(ctx, 0, -1)
}
@@ -86,6 +92,12 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding
// that file will be served back whole, regardless of the requested range as
// Google Cloud Storage dictates.
+//
+// By default, reads are made using the Cloud Storage XML API. We recommend
+// using the JSON API instead, which can be done by setting [WithJSONReads]
+// when calling [NewClient]. This ensures consistency with other client
+// operations, which all use JSON. JSON will become the default in a future
+// release.
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
// This span covers the life of the reader. It is closed via the context
// in Reader.Close.
diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go
index 0c335f38a98..b6316fa668f 100644
--- a/vendor/cloud.google.com/go/storage/storage.go
+++ b/vendor/cloud.google.com/go/storage/storage.go
@@ -117,10 +117,6 @@ type Client struct {
// tc is the transport-agnostic client implemented with either gRPC or HTTP.
tc storageClient
- // useGRPC flags whether the client uses gRPC. This is needed while the
- // integration piece is only partially complete.
- // TODO: remove before merging to main.
- useGRPC bool
}
// NewClient creates a new Google Cloud Storage client using the HTTP transport.
@@ -237,7 +233,7 @@ func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, e
return nil, err
}
- return &Client{tc: tc, useGRPC: true}, nil
+ return &Client{tc: tc}, nil
}
// Close closes the Client.
@@ -975,7 +971,8 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
gen: o.gen,
encryptionKey: o.encryptionKey,
conds: o.conds,
- overrideRetention: o.overrideRetention}, opts...)
+ overrideRetention: o.overrideRetention,
+ }, opts...)
}
// BucketName returns the name of the bucket.
@@ -2356,7 +2353,6 @@ func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChec
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
o := makeStorageOpts(true, c.retry, "")
return c.tc.GetServiceAccount(ctx, projectID, o...)
-
}
// bucketResourceName formats the given project ID and bucketResourceName ID
diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE
deleted file mode 100644
index 37ec93a14fd..00000000000
--- a/vendor/github.com/golang/glog/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/golang/glog/README.md b/vendor/github.com/golang/glog/README.md
deleted file mode 100644
index a4f73883b2d..00000000000
--- a/vendor/github.com/golang/glog/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# glog
-
-[![PkgGoDev](https://pkg.go.dev/badge/github.com/golang/glog)](https://pkg.go.dev/github.com/golang/glog)
-
-Leveled execution logs for Go.
-
-This is an efficient pure Go implementation of leveled logs in the
-manner of the open source C++ package [_glog_](https://github.com/google/glog).
-
-By binding methods to booleans it is possible to use the log package without paying the expense of evaluating the arguments to the log. Through the `-vmodule` flag, the package also provides fine-grained
-control over logging at the file level.
-
-The comment from `glog.go` introduces the ideas:
-
-Package _glog_ implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. It provides the functions Info, Warning, Error, Fatal, plus formatting variants such as Infof. It also provides V-style loggingcontrolled by the `-v` and `-vmodule=file=2` flags.
-
-Basic examples:
-
-```go
-glog.Info("Prepare to repel boarders")
-
-glog.Fatalf("Initialization failed: %s", err)
-```
-
-See the documentation for the V function for an explanation of these examples:
-
-```go
-if glog.V(2) {
- glog.Info("Starting transaction...")
-}
-glog.V(2).Infoln("Processed", nItems, "elements")
-```
-
-The repository contains an open source version of the log package used inside Google. The master copy of the source lives inside Google, not here. The code in this repo is for export only and is not itself under development. Feature requests will be ignored.
-
-Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go
deleted file mode 100644
index 8c00e737a03..00000000000
--- a/vendor/github.com/golang/glog/glog.go
+++ /dev/null
@@ -1,777 +0,0 @@
-// Go support for leveled logs, analogous to https://github.com/google/glog.
-//
-// Copyright 2023 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
-// It provides functions that have a name matched by regex:
-//
-// (Info|Warning|Error|Fatal)(Context)?(Depth)?(f)?
-//
-// If Context is present, function takes context.Context argument. The
-// context is used to pass through the Trace Context to log sinks that can make use
-// of it.
-// It is recommended to use the context variant of the functions over the non-context
-// variants if a context is available to make sure the Trace Contexts are present
-// in logs.
-//
-// If Depth is present, this function calls log from a different depth in the call stack.
-// This enables a callee to emit logs that use the callsite information of its caller
-// or any other callers in the stack. When depth == 0, the original callee's line
-// information is emitted. When depth > 0, depth frames are skipped in the call stack
-// and the final frame is treated like the original callee to Info.
-//
-// If 'f' is present, function formats according to a format specifier.
-//
-// This package also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
-//
-// Basic examples:
-//
-// glog.Info("Prepare to repel boarders")
-//
-// glog.Fatalf("Initialization failed: %s", err)
-//
-// See the documentation for the V function for an explanation of these examples:
-//
-// if glog.V(2) {
-// glog.Info("Starting transaction...")
-// }
-//
-// glog.V(2).Infoln("Processed", nItems, "elements")
-//
-// Log output is buffered and written periodically using Flush. Programs
-// should call Flush before exiting to guarantee all log output is written.
-//
-// By default, all log statements write to files in a temporary directory.
-// This package provides several flags that modify this behavior.
-// As a result, flag.Parse must be called before any logging is done.
-//
-// -logtostderr=false
-// Logs are written to standard error instead of to files.
-// -alsologtostderr=false
-// Logs are written to standard error as well as to files.
-// -stderrthreshold=ERROR
-// Log events at or above this severity are logged to standard
-// error as well as to files.
-// -log_dir=""
-// Log files will be written to this directory instead of the
-// default temporary directory.
-//
-// Other flags provide aids to debugging.
-//
-// -log_backtrace_at=""
-// A comma-separated list of file and line numbers holding a logging
-// statement, such as
-// -log_backtrace_at=gopherflakes.go:234
-// A stack trace will be written to the Info log whenever execution
-// hits one of these statements. (Unlike with -vmodule, the ".go"
-// must bepresent.)
-// -v=0
-// Enable V-leveled logging at the specified level.
-// -vmodule=""
-// The syntax of the argument is a comma-separated list of pattern=N,
-// where pattern is a literal file name (minus the ".go" suffix) or
-// "glob" pattern and N is a V level. For instance,
-// -vmodule=gopher*=3
-// sets the V level to 3 in all Go files whose names begin with "gopher",
-// and
-// -vmodule=/path/to/glog/glog_test=1
-// sets the V level to 1 in the Go file /path/to/glog/glog_test.go.
-// If a glob pattern contains a slash, it is matched against the full path,
-// and the file name. Otherwise, the pattern is
-// matched only against the file's basename. When both -vmodule and -v
-// are specified, the -vmodule values take precedence for the specified
-// modules.
-package glog
-
-// This file contains the parts of the log package that are shared among all
-// implementations (file, envelope, and appengine).
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- stdLog "log"
- "os"
- "reflect"
- "runtime"
- "runtime/pprof"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/golang/glog/internal/logsink"
- "github.com/golang/glog/internal/stackdump"
-)
-
-var timeNow = time.Now // Stubbed out for testing.
-
-// MaxSize is the maximum size of a log file in bytes.
-var MaxSize uint64 = 1024 * 1024 * 1800
-
-// ErrNoLog is the error we return if no log file has yet been created
-// for the specified log type.
-var ErrNoLog = errors.New("log file not yet created")
-
-// OutputStats tracks the number of output lines and bytes written.
-type OutputStats struct {
- lines int64
- bytes int64
-}
-
-// Lines returns the number of lines written.
-func (s *OutputStats) Lines() int64 {
- return atomic.LoadInt64(&s.lines)
-}
-
-// Bytes returns the number of bytes written.
-func (s *OutputStats) Bytes() int64 {
- return atomic.LoadInt64(&s.bytes)
-}
-
-// Stats tracks the number of lines of output and number of bytes
-// per severity level. Values must be read with atomic.LoadInt64.
-var Stats struct {
- Info, Warning, Error OutputStats
-}
-
-var severityStats = [...]*OutputStats{
- logsink.Info: &Stats.Info,
- logsink.Warning: &Stats.Warning,
- logsink.Error: &Stats.Error,
- logsink.Fatal: nil,
-}
-
-// Level specifies a level of verbosity for V logs. The -v flag is of type
-// Level and should be modified only through the flag.Value interface.
-type Level int32
-
-var metaPool sync.Pool // Pool of *logsink.Meta.
-
-// metaPoolGet returns a *logsink.Meta from metaPool as both an interface and a
-// pointer, allocating a new one if necessary. (Returning the interface value
-// directly avoids an allocation if there was an existing pointer in the pool.)
-func metaPoolGet() (any, *logsink.Meta) {
- if metai := metaPool.Get(); metai != nil {
- return metai, metai.(*logsink.Meta)
- }
- meta := new(logsink.Meta)
- return meta, meta
-}
-
-type stack bool
-
-const (
- noStack = stack(false)
- withStack = stack(true)
-)
-
-func appendBacktrace(depth int, format string, args []any) (string, []any) {
- // Capture a backtrace as a stackdump.Stack (both text and PC slice).
- // Structured log sinks can extract the backtrace in whichever format they
- // prefer (PCs or text), and Text sinks will include it as just another part
- // of the log message.
- //
- // Use depth instead of depth+1 so that the backtrace always includes the
- // log function itself - otherwise the reason for the trace appearing in the
- // log may not be obvious to the reader.
- dump := stackdump.Caller(depth)
-
- // Add an arg and an entry in the format string for the stack dump.
- //
- // Copy the "args" slice to avoid a rare but serious aliasing bug
- // (corrupting the caller's slice if they passed it to a non-Fatal call
- // using "...").
- format = format + "\n\n%v\n"
- args = append(append([]any(nil), args...), dump)
-
- return format, args
-}
-
-// logf acts as ctxlogf, but doesn't expect a context.
-func logf(depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) {
- ctxlogf(nil, depth+1, severity, verbose, stack, format, args...)
-}
-
-// ctxlogf writes a log message for a log function call (or log function wrapper)
-// at the given depth in the current goroutine's stack.
-func ctxlogf(ctx context.Context, depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) {
- now := timeNow()
- _, file, line, ok := runtime.Caller(depth + 1)
- if !ok {
- file = "???"
- line = 1
- }
-
- if stack == withStack || backtraceAt(file, line) {
- format, args = appendBacktrace(depth+1, format, args)
- }
-
- metai, meta := metaPoolGet()
- *meta = logsink.Meta{
- Context: ctx,
- Time: now,
- File: file,
- Line: line,
- Depth: depth + 1,
- Severity: severity,
- Verbose: verbose,
- Thread: int64(pid),
- }
- sinkf(meta, format, args...)
- // Clear pointer fields so they can be garbage collected early.
- meta.Context = nil
- meta.Stack = nil
- metaPool.Put(metai)
-}
-
-func sinkf(meta *logsink.Meta, format string, args ...any) {
- meta.Depth++
- n, err := logsink.Printf(meta, format, args...)
- if stats := severityStats[meta.Severity]; stats != nil {
- atomic.AddInt64(&stats.lines, 1)
- atomic.AddInt64(&stats.bytes, int64(n))
- }
-
- if err != nil {
- logsink.Printf(meta, "glog: exiting because of error: %s", err)
- sinks.file.Flush()
- os.Exit(2)
- }
-}
-
-// CopyStandardLogTo arranges for messages written to the Go "log" package's
-// default logs to also appear in the Google logs for the named and lower
-// severities. Subsequent changes to the standard log's default output location
-// or format may break this behavior.
-//
-// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
-// recognized, CopyStandardLogTo panics.
-func CopyStandardLogTo(name string) {
- sev, err := logsink.ParseSeverity(name)
- if err != nil {
- panic(fmt.Sprintf("log.CopyStandardLogTo(%q): %v", name, err))
- }
- // Set a log format that captures the user's file and line:
- // d.go:23: message
- stdLog.SetFlags(stdLog.Lshortfile)
- stdLog.SetOutput(logBridge(sev))
-}
-
-// NewStandardLogger returns a Logger that writes to the Google logs for the
-// named and lower severities.
-//
-// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
-// recognized, NewStandardLogger panics.
-func NewStandardLogger(name string) *stdLog.Logger {
- sev, err := logsink.ParseSeverity(name)
- if err != nil {
- panic(fmt.Sprintf("log.NewStandardLogger(%q): %v", name, err))
- }
- return stdLog.New(logBridge(sev), "", stdLog.Lshortfile)
-}
-
-// logBridge provides the Write method that enables CopyStandardLogTo to connect
-// Go's standard logs to the logs provided by this package.
-type logBridge logsink.Severity
-
-// Write parses the standard logging line and passes its components to the
-// logger for severity(lb).
-func (lb logBridge) Write(b []byte) (n int, err error) {
- var (
- file = "???"
- line = 1
- text string
- )
- // Split "d.go:23: message" into "d.go", "23", and "message".
- if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
- text = fmt.Sprintf("bad log format: %s", b)
- } else {
- file = string(parts[0])
- text = string(parts[2][1:]) // skip leading space
- line, err = strconv.Atoi(string(parts[1]))
- if err != nil {
- text = fmt.Sprintf("bad line number: %s", b)
- line = 1
- }
- }
-
- // The depth below hard-codes details of how stdlog gets here. The alternative would be to walk
- // up the stack looking for src/log/log.go but that seems like it would be
- // unfortunately slow.
- const stdLogDepth = 4
-
- metai, meta := metaPoolGet()
- *meta = logsink.Meta{
- Time: timeNow(),
- File: file,
- Line: line,
- Depth: stdLogDepth,
- Severity: logsink.Severity(lb),
- Thread: int64(pid),
- }
-
- format := "%s"
- args := []any{text}
- if backtraceAt(file, line) {
- format, args = appendBacktrace(meta.Depth, format, args)
- }
-
- sinkf(meta, format, args...)
- metaPool.Put(metai)
-
- return len(b), nil
-}
-
-// defaultFormat returns a fmt.Printf format specifier that formats its
-// arguments as if they were passed to fmt.Print.
-func defaultFormat(args []any) string {
- n := len(args)
- switch n {
- case 0:
- return ""
- case 1:
- return "%v"
- }
-
- b := make([]byte, 0, n*3-1)
- wasString := true // Suppress leading space.
- for _, arg := range args {
- isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String
- if wasString || isString {
- b = append(b, "%v"...)
- } else {
- b = append(b, " %v"...)
- }
- wasString = isString
- }
- return string(b)
-}
-
-// lnFormat returns a fmt.Printf format specifier that formats its arguments
-// as if they were passed to fmt.Println.
-func lnFormat(args []any) string {
- if len(args) == 0 {
- return "\n"
- }
-
- b := make([]byte, 0, len(args)*3)
- for range args {
- b = append(b, "%v "...)
- }
- b[len(b)-1] = '\n' // Replace the last space with a newline.
- return string(b)
-}
-
-// Verbose is a boolean type that implements Infof (like Printf) etc.
-// See the documentation of V for more information.
-type Verbose bool
-
-// V reports whether verbosity at the call site is at least the requested level.
-// The returned value is a boolean of type Verbose, which implements Info, Infoln
-// and Infof. These methods will write to the Info log if called.
-// Thus, one may write either
-//
-// if glog.V(2) { glog.Info("log this") }
-//
-// or
-//
-// glog.V(2).Info("log this")
-//
-// The second form is shorter but the first is cheaper if logging is off because it does
-// not evaluate its arguments.
-//
-// Whether an individual call to V generates a log record depends on the setting of
-// the -v and --vmodule flags; both are off by default. If the level in the call to
-// V is at most the value of -v, or of -vmodule for the source file containing the
-// call, the V call will log.
-func V(level Level) Verbose {
- return VDepth(1, level)
-}
-
-// VDepth acts as V but uses depth to determine which call frame to check vmodule for.
-// VDepth(0, level) is the same as V(level).
-func VDepth(depth int, level Level) Verbose {
- return Verbose(verboseEnabled(depth+1, level))
-}
-
-// Info is equivalent to the global Info function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Info(args ...any) {
- v.InfoDepth(1, args...)
-}
-
-// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) InfoDepth(depth int, args ...any) {
- if v {
- logf(depth+1, logsink.Info, true, noStack, defaultFormat(args), args...)
- }
-}
-
-// InfoDepthf is equivalent to the global InfoDepthf function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) InfoDepthf(depth int, format string, args ...any) {
- if v {
- logf(depth+1, logsink.Info, true, noStack, format, args...)
- }
-}
-
-// Infoln is equivalent to the global Infoln function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Infoln(args ...any) {
- if v {
- logf(1, logsink.Info, true, noStack, lnFormat(args), args...)
- }
-}
-
-// Infof is equivalent to the global Infof function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Infof(format string, args ...any) {
- if v {
- logf(1, logsink.Info, true, noStack, format, args...)
- }
-}
-
-// InfoContext is equivalent to the global InfoContext function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) InfoContext(ctx context.Context, args ...any) {
- v.InfoContextDepth(ctx, 1, args...)
-}
-
-// InfoContextf is equivalent to the global InfoContextf function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) InfoContextf(ctx context.Context, format string, args ...any) {
- if v {
- ctxlogf(ctx, 1, logsink.Info, true, noStack, format, args...)
- }
-}
-
-// InfoContextDepth is equivalent to the global InfoContextDepth function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) InfoContextDepth(ctx context.Context, depth int, args ...any) {
- if v {
- ctxlogf(ctx, depth+1, logsink.Info, true, noStack, defaultFormat(args), args...)
- }
-}
-
-// InfoContextDepthf is equivalent to the global InfoContextDepthf function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) InfoContextDepthf(ctx context.Context, depth int, format string, args ...any) {
- if v {
- ctxlogf(ctx, depth+1, logsink.Info, true, noStack, format, args...)
- }
-}
-
-// Info logs to the INFO log.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Info(args ...any) {
- InfoDepth(1, args...)
-}
-
-// InfoDepth calls Info from a different depth in the call stack.
-// This enables a callee to emit logs that use the callsite information of its caller
-// or any other callers in the stack. When depth == 0, the original callee's line
-// information is emitted. When depth > 0, depth frames are skipped in the call stack
-// and the final frame is treated like the original callee to Info.
-func InfoDepth(depth int, args ...any) {
- logf(depth+1, logsink.Info, false, noStack, defaultFormat(args), args...)
-}
-
-// InfoDepthf acts as InfoDepth but with format string.
-func InfoDepthf(depth int, format string, args ...any) {
- logf(depth+1, logsink.Info, false, noStack, format, args...)
-}
-
-// Infoln logs to the INFO log.
-// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
-func Infoln(args ...any) {
- logf(1, logsink.Info, false, noStack, lnFormat(args), args...)
-}
-
-// Infof logs to the INFO log.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Infof(format string, args ...any) {
- logf(1, logsink.Info, false, noStack, format, args...)
-}
-
-// InfoContext is like [Info], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func InfoContext(ctx context.Context, args ...any) {
- InfoContextDepth(ctx, 1, args...)
-}
-
-// InfoContextf is like [Infof], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func InfoContextf(ctx context.Context, format string, args ...any) {
- ctxlogf(ctx, 1, logsink.Info, false, noStack, format, args...)
-}
-
-// InfoContextDepth is like [InfoDepth], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func InfoContextDepth(ctx context.Context, depth int, args ...any) {
- ctxlogf(ctx, depth+1, logsink.Info, false, noStack, defaultFormat(args), args...)
-}
-
-// InfoContextDepthf is like [InfoDepthf], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func InfoContextDepthf(ctx context.Context, depth int, format string, args ...any) {
- ctxlogf(ctx, depth+1, logsink.Info, false, noStack, format, args...)
-}
-
-// Warning logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Warning(args ...any) {
- WarningDepth(1, args...)
-}
-
-// WarningDepth acts as Warning but uses depth to determine which call frame to log.
-// WarningDepth(0, "msg") is the same as Warning("msg").
-func WarningDepth(depth int, args ...any) {
- logf(depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...)
-}
-
-// WarningDepthf acts as Warningf but uses depth to determine which call frame to log.
-// WarningDepthf(0, "msg") is the same as Warningf("msg").
-func WarningDepthf(depth int, format string, args ...any) {
- logf(depth+1, logsink.Warning, false, noStack, format, args...)
-}
-
-// Warningln logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
-func Warningln(args ...any) {
- logf(1, logsink.Warning, false, noStack, lnFormat(args), args...)
-}
-
-// Warningf logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Warningf(format string, args ...any) {
- logf(1, logsink.Warning, false, noStack, format, args...)
-}
-
-// WarningContext is like [Warning], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func WarningContext(ctx context.Context, args ...any) {
- WarningContextDepth(ctx, 1, args...)
-}
-
-// WarningContextf is like [Warningf], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func WarningContextf(ctx context.Context, format string, args ...any) {
- ctxlogf(ctx, 1, logsink.Warning, false, noStack, format, args...)
-}
-
-// WarningContextDepth is like [WarningDepth], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func WarningContextDepth(ctx context.Context, depth int, args ...any) {
- ctxlogf(ctx, depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...)
-}
-
-// WarningContextDepthf is like [WarningDepthf], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func WarningContextDepthf(ctx context.Context, depth int, format string, args ...any) {
- ctxlogf(ctx, depth+1, logsink.Warning, false, noStack, format, args...)
-}
-
-// Error logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Error(args ...any) {
- ErrorDepth(1, args...)
-}
-
-// ErrorDepth acts as Error but uses depth to determine which call frame to log.
-// ErrorDepth(0, "msg") is the same as Error("msg").
-func ErrorDepth(depth int, args ...any) {
- logf(depth+1, logsink.Error, false, noStack, defaultFormat(args), args...)
-}
-
-// ErrorDepthf acts as Errorf but uses depth to determine which call frame to log.
-// ErrorDepthf(0, "msg") is the same as Errorf("msg").
-func ErrorDepthf(depth int, format string, args ...any) {
- logf(depth+1, logsink.Error, false, noStack, format, args...)
-}
-
-// Errorln logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
-func Errorln(args ...any) {
- logf(1, logsink.Error, false, noStack, lnFormat(args), args...)
-}
-
-// Errorf logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Errorf(format string, args ...any) {
- logf(1, logsink.Error, false, noStack, format, args...)
-}
-
-// ErrorContext is like [Error], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func ErrorContext(ctx context.Context, args ...any) {
- ErrorContextDepth(ctx, 1, args...)
-}
-
-// ErrorContextf is like [Errorf], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func ErrorContextf(ctx context.Context, format string, args ...any) {
- ctxlogf(ctx, 1, logsink.Error, false, noStack, format, args...)
-}
-
-// ErrorContextDepth is like [ErrorDepth], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func ErrorContextDepth(ctx context.Context, depth int, args ...any) {
- ctxlogf(ctx, depth+1, logsink.Error, false, noStack, defaultFormat(args), args...)
-}
-
-// ErrorContextDepthf is like [ErrorDepthf], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func ErrorContextDepthf(ctx context.Context, depth int, format string, args ...any) {
- ctxlogf(ctx, depth+1, logsink.Error, false, noStack, format, args...)
-}
-
-func ctxfatalf(ctx context.Context, depth int, format string, args ...any) {
- ctxlogf(ctx, depth+1, logsink.Fatal, false, withStack, format, args...)
- sinks.file.Flush()
-
- err := abortProcess() // Should not return.
-
- // Failed to abort the process using signals. Dump a stack trace and exit.
- Errorf("abortProcess returned unexpectedly: %v", err)
- sinks.file.Flush()
- pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
- os.Exit(2) // Exit with the same code as the default SIGABRT handler.
-}
-
-func fatalf(depth int, format string, args ...any) {
- ctxfatalf(nil, depth+1, format, args...)
-}
-
-// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls os.Exit(2).
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Fatal(args ...any) {
- FatalDepth(1, args...)
-}
-
-// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
-// FatalDepth(0, "msg") is the same as Fatal("msg").
-func FatalDepth(depth int, args ...any) {
- fatalf(depth+1, defaultFormat(args), args...)
-}
-
-// FatalDepthf acts as Fatalf but uses depth to determine which call frame to log.
-// FatalDepthf(0, "msg") is the same as Fatalf("msg").
-func FatalDepthf(depth int, format string, args ...any) {
- fatalf(depth+1, format, args...)
-}
-
-// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls os.Exit(2).
-// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
-func Fatalln(args ...any) {
- fatalf(1, lnFormat(args), args...)
-}
-
-// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls os.Exit(2).
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Fatalf(format string, args ...any) {
- fatalf(1, format, args...)
-}
-
-// FatalContext is like [Fatal], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func FatalContext(ctx context.Context, args ...any) {
- FatalContextDepth(ctx, 1, args...)
-}
-
-// FatalContextf is like [Fatalf], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func FatalContextf(ctx context.Context, format string, args ...any) {
- ctxfatalf(ctx, 1, format, args...)
-}
-
-// FatalContextDepth is like [FatalDepth], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func FatalContextDepth(ctx context.Context, depth int, args ...any) {
- ctxfatalf(ctx, depth+1, defaultFormat(args), args...)
-}
-
-// FatalContextDepthf is like [FatalDepthf], but with an extra [context.Context] parameter.
-func FatalContextDepthf(ctx context.Context, depth int, format string, args ...any) {
- ctxfatalf(ctx, depth+1, format, args...)
-}
-
-func ctxexitf(ctx context.Context, depth int, format string, args ...any) {
- ctxlogf(ctx, depth+1, logsink.Fatal, false, noStack, format, args...)
- sinks.file.Flush()
- os.Exit(1)
-}
-
-func exitf(depth int, format string, args ...any) {
- ctxexitf(nil, depth+1, format, args...)
-}
-
-// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Exit(args ...any) {
- ExitDepth(1, args...)
-}
-
-// ExitDepth acts as Exit but uses depth to determine which call frame to log.
-// ExitDepth(0, "msg") is the same as Exit("msg").
-func ExitDepth(depth int, args ...any) {
- exitf(depth+1, defaultFormat(args), args...)
-}
-
-// ExitDepthf acts as Exitf but uses depth to determine which call frame to log.
-// ExitDepthf(0, "msg") is the same as Exitf("msg").
-func ExitDepthf(depth int, format string, args ...any) {
- exitf(depth+1, format, args...)
-}
-
-// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
-func Exitln(args ...any) {
- exitf(1, lnFormat(args), args...)
-}
-
-// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Exitf(format string, args ...any) {
- exitf(1, format, args...)
-}
-
-// ExitContext is like [Exit], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func ExitContext(ctx context.Context, args ...any) {
- ExitContextDepth(ctx, 1, args...)
-}
-
-// ExitContextf is like [Exitf], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func ExitContextf(ctx context.Context, format string, args ...any) {
- ctxexitf(ctx, 1, format, args...)
-}
-
-// ExitContextDepth is like [ExitDepth], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func ExitContextDepth(ctx context.Context, depth int, args ...any) {
- ctxexitf(ctx, depth+1, defaultFormat(args), args...)
-}
-
-// ExitContextDepthf is like [ExitDepthf], but with an extra [context.Context] parameter. The
-// context is used to pass the Trace Context to log sinks.
-func ExitContextDepthf(ctx context.Context, depth int, format string, args ...any) {
- ctxexitf(ctx, depth+1, format, args...)
-}
diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go
deleted file mode 100644
index e7d125c5ae4..00000000000
--- a/vendor/github.com/golang/glog/glog_file.go
+++ /dev/null
@@ -1,413 +0,0 @@
-// Go support for leveled logs, analogous to https://github.com/google/glog.
-//
-// Copyright 2023 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// File I/O for logs.
-
-package glog
-
-import (
- "bufio"
- "bytes"
- "errors"
- "flag"
- "fmt"
- "io"
- "os"
- "os/user"
- "path/filepath"
- "runtime"
- "strings"
- "sync"
- "time"
-
- "github.com/golang/glog/internal/logsink"
-)
-
-// logDirs lists the candidate directories for new log files.
-var logDirs []string
-
-var (
- // If non-empty, overrides the choice of directory in which to write logs.
- // See createLogDirs for the full list of possible destinations.
- logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
- logLink = flag.String("log_link", "", "If non-empty, add symbolic links in this directory to the log files")
- logBufLevel = flag.Int("logbuflevel", int(logsink.Info), "Buffer log messages logged at this level or lower"+
- " (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.")
-)
-
-func createLogDirs() {
- if *logDir != "" {
- logDirs = append(logDirs, *logDir)
- }
- logDirs = append(logDirs, os.TempDir())
-}
-
-var (
- pid = os.Getpid()
- program = filepath.Base(os.Args[0])
- host = "unknownhost"
- userName = "unknownuser"
-)
-
-func init() {
- h, err := os.Hostname()
- if err == nil {
- host = shortHostname(h)
- }
-
- current, err := user.Current()
- if err == nil {
- userName = current.Username
- }
- // Sanitize userName since it is used to construct file paths.
- userName = strings.Map(func(r rune) rune {
- switch {
- case r >= 'a' && r <= 'z':
- case r >= 'A' && r <= 'Z':
- case r >= '0' && r <= '9':
- default:
- return '_'
- }
- return r
- }, userName)
-}
-
-// shortHostname returns its argument, truncating at the first period.
-// For instance, given "www.google.com" it returns "www".
-func shortHostname(hostname string) string {
- if i := strings.Index(hostname, "."); i >= 0 {
- return hostname[:i]
- }
- return hostname
-}
-
-// logName returns a new log file name containing tag, with start time t, and
-// the name for the symlink for tag.
-func logName(tag string, t time.Time) (name, link string) {
- name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
- program,
- host,
- userName,
- tag,
- t.Year(),
- t.Month(),
- t.Day(),
- t.Hour(),
- t.Minute(),
- t.Second(),
- pid)
- return name, program + "." + tag
-}
-
-var onceLogDirs sync.Once
-
-// create creates a new log file and returns the file and its filename, which
-// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
-// successfully, create also attempts to update the symlink for that tag, ignoring
-// errors.
-func create(tag string, t time.Time) (f *os.File, filename string, err error) {
- onceLogDirs.Do(createLogDirs)
- if len(logDirs) == 0 {
- return nil, "", errors.New("log: no log dirs")
- }
- name, link := logName(tag, t)
- var lastErr error
- for _, dir := range logDirs {
- fname := filepath.Join(dir, name)
- f, err := os.Create(fname)
- if err == nil {
- symlink := filepath.Join(dir, link)
- os.Remove(symlink) // ignore err
- os.Symlink(name, symlink) // ignore err
- if *logLink != "" {
- lsymlink := filepath.Join(*logLink, link)
- os.Remove(lsymlink) // ignore err
- os.Symlink(fname, lsymlink) // ignore err
- }
- return f, fname, nil
- }
- lastErr = err
- }
- return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
-}
-
-// flushSyncWriter is the interface satisfied by logging destinations.
-type flushSyncWriter interface {
- Flush() error
- Sync() error
- io.Writer
- filenames() []string
-}
-
-var sinks struct {
- stderr stderrSink
- file fileSink
-}
-
-func init() {
- // Register stderr first: that way if we crash during file-writing at least
- // the log will have gone somewhere.
- logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr, &sinks.file)
-
- sinks.file.flushChan = make(chan logsink.Severity, 1)
- go sinks.file.flushDaemon()
-}
-
-// stderrSink is a logsink.Text that writes log entries to stderr
-// if they meet certain conditions.
-type stderrSink struct {
- mu sync.Mutex
- w io.Writer // if nil Emit uses os.Stderr directly
-}
-
-// Enabled implements logsink.Text.Enabled. It returns true if any of the
-// various stderr flags are enabled for logs of the given severity, if the log
-// message is from the standard "log" package, or if google.Init has not yet run
-// (and hence file logging is not yet initialized).
-func (s *stderrSink) Enabled(m *logsink.Meta) bool {
- return toStderr || alsoToStderr || m.Severity >= stderrThreshold.get()
-}
-
-// Emit implements logsink.Text.Emit.
-func (s *stderrSink) Emit(m *logsink.Meta, data []byte) (n int, err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- w := s.w
- if w == nil {
- w = os.Stderr
- }
- dn, err := w.Write(data)
- n += dn
- return n, err
-}
-
-// severityWriters is an array of flushSyncWriter with a value for each
-// logsink.Severity.
-type severityWriters [4]flushSyncWriter
-
-// fileSink is a logsink.Text that prints to a set of Google log files.
-type fileSink struct {
- mu sync.Mutex
- // file holds writer for each of the log types.
- file severityWriters
- flushChan chan logsink.Severity
-}
-
-// Enabled implements logsink.Text.Enabled. It returns true if google.Init
-// has run and both --disable_log_to_disk and --logtostderr are false.
-func (s *fileSink) Enabled(m *logsink.Meta) bool {
- return !toStderr
-}
-
-// Emit implements logsink.Text.Emit
-func (s *fileSink) Emit(m *logsink.Meta, data []byte) (n int, err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if err = s.createMissingFiles(m.Severity); err != nil {
- return 0, err
- }
- for sev := m.Severity; sev >= logsink.Info; sev-- {
- if _, fErr := s.file[sev].Write(data); fErr != nil && err == nil {
- err = fErr // Take the first error.
- }
- }
- n = len(data)
- if int(m.Severity) > *logBufLevel {
- select {
- case s.flushChan <- m.Severity:
- default:
- }
- }
-
- return n, err
-}
-
-// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
-// file's Sync method and providing a wrapper for the Write method that provides log
-// file rotation. There are conflicting methods, so the file cannot be embedded.
-// s.mu is held for all its methods.
-type syncBuffer struct {
- sink *fileSink
- *bufio.Writer
- file *os.File
- names []string
- sev logsink.Severity
- nbytes uint64 // The number of bytes written to this file
-}
-
-func (sb *syncBuffer) Sync() error {
- return sb.file.Sync()
-}
-
-func (sb *syncBuffer) Write(p []byte) (n int, err error) {
- if sb.nbytes+uint64(len(p)) >= MaxSize {
- if err := sb.rotateFile(time.Now()); err != nil {
- return 0, err
- }
- }
- n, err = sb.Writer.Write(p)
- sb.nbytes += uint64(n)
- return n, err
-}
-
-func (sb *syncBuffer) filenames() []string {
- return sb.names
-}
-
-const footer = "\nCONTINUED IN NEXT FILE\n"
-
-// rotateFile closes the syncBuffer's file and starts a new one.
-func (sb *syncBuffer) rotateFile(now time.Time) error {
- var err error
- pn := ""
- file, name, err := create(sb.sev.String(), now)
-
- if sb.file != nil {
- // The current log file becomes the previous log at the end of
- // this block, so save its name for use in the header of the next
- // file.
- pn = sb.file.Name()
- sb.Flush()
- // If there's an existing file, write a footer with the name of
- // the next file in the chain, followed by the constant string
- // \nCONTINUED IN NEXT FILE\n to make continuation detection simple.
- sb.file.Write([]byte("Next log: "))
- sb.file.Write([]byte(name))
- sb.file.Write([]byte(footer))
- sb.file.Close()
- }
-
- sb.file = file
- sb.names = append(sb.names, name)
- sb.nbytes = 0
- if err != nil {
- return err
- }
-
- sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
-
- // Write header.
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
- fmt.Fprintf(&buf, "Running on machine: %s\n", host)
- fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
- fmt.Fprintf(&buf, "Previous log: %s\n", pn)
- fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
- n, err := sb.file.Write(buf.Bytes())
- sb.nbytes += uint64(n)
- return err
-}
-
-// bufferSize sizes the buffer associated with each log file. It's large
-// so that log records can accumulate without the logging thread blocking
-// on disk I/O. The flushDaemon will block instead.
-const bufferSize = 256 * 1024
-
-// createMissingFiles creates all the log files for severity from infoLog up to
-// upTo that have not already been created.
-// s.mu is held.
-func (s *fileSink) createMissingFiles(upTo logsink.Severity) error {
- if s.file[upTo] != nil {
- return nil
- }
- now := time.Now()
- // Files are created in increasing severity order, so we can be assured that
- // if a high severity logfile exists, then so do all of lower severity.
- for sev := logsink.Info; sev <= upTo; sev++ {
- if s.file[sev] != nil {
- continue
- }
- sb := &syncBuffer{
- sink: s,
- sev: sev,
- }
- if err := sb.rotateFile(now); err != nil {
- return err
- }
- s.file[sev] = sb
- }
- return nil
-}
-
-// flushDaemon periodically flushes the log file buffers.
-func (s *fileSink) flushDaemon() {
- tick := time.NewTicker(30 * time.Second)
- defer tick.Stop()
- for {
- select {
- case <-tick.C:
- s.Flush()
- case sev := <-s.flushChan:
- s.flush(sev)
- }
- }
-}
-
-// Flush flushes all pending log I/O.
-func Flush() {
- sinks.file.Flush()
-}
-
-// Flush flushes all the logs and attempts to "sync" their data to disk.
-func (s *fileSink) Flush() error {
- return s.flush(logsink.Info)
-}
-
-// flush flushes all logs of severity threshold or greater.
-func (s *fileSink) flush(threshold logsink.Severity) error {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- var firstErr error
- updateErr := func(err error) {
- if err != nil && firstErr == nil {
- firstErr = err
- }
- }
-
- // Flush from fatal down, in case there's trouble flushing.
- for sev := logsink.Fatal; sev >= threshold; sev-- {
- file := s.file[sev]
- if file != nil {
- updateErr(file.Flush())
- updateErr(file.Sync())
- }
- }
-
- return firstErr
-}
-
-// Names returns the names of the log files holding the FATAL, ERROR,
-// WARNING, or INFO logs. Returns ErrNoLog if the log for the given
-// level doesn't exist (e.g. because no messages of that level have been
-// written). This may return multiple names if the log type requested
-// has rolled over.
-func Names(s string) ([]string, error) {
- severity, err := logsink.ParseSeverity(s)
- if err != nil {
- return nil, err
- }
-
- sinks.file.mu.Lock()
- defer sinks.file.mu.Unlock()
- f := sinks.file.file[severity]
- if f == nil {
- return nil, ErrNoLog
- }
-
- return f.filenames(), nil
-}
diff --git a/vendor/github.com/golang/glog/glog_file_linux.go b/vendor/github.com/golang/glog/glog_file_linux.go
deleted file mode 100644
index d795092d03a..00000000000
--- a/vendor/github.com/golang/glog/glog_file_linux.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Go support for leveled logs, analogous to https://github.com/google/glog.
-//
-// Copyright 2023 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-
-package glog
-
-import (
- "errors"
- "runtime"
- "syscall"
-)
-
-// abortProcess attempts to kill the current process in a way that will dump the
-// currently-running goroutines someplace useful (like stderr).
-//
-// It does this by sending SIGABRT to the current thread.
-//
-// If successful, abortProcess does not return.
-func abortProcess() error {
- runtime.LockOSThread()
- if err := syscall.Tgkill(syscall.Getpid(), syscall.Gettid(), syscall.SIGABRT); err != nil {
- return err
- }
- return errors.New("log: killed current thread with SIGABRT, but still running")
-}
diff --git a/vendor/github.com/golang/glog/glog_file_other.go b/vendor/github.com/golang/glog/glog_file_other.go
deleted file mode 100644
index 9540f14fc08..00000000000
--- a/vendor/github.com/golang/glog/glog_file_other.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Go support for leveled logs, analogous to https://github.com/google/glog.
-//
-// Copyright 2023 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !(unix || windows)
-
-package glog
-
-import (
- "fmt"
- "runtime"
-)
-
-// abortProcess returns an error on platforms that presumably don't support signals.
-func abortProcess() error {
- return fmt.Errorf("not sending SIGABRT (%s/%s does not support signals), falling back", runtime.GOOS, runtime.GOARCH)
-
-}
diff --git a/vendor/github.com/golang/glog/glog_file_posix.go b/vendor/github.com/golang/glog/glog_file_posix.go
deleted file mode 100644
index c27c7c0e4ab..00000000000
--- a/vendor/github.com/golang/glog/glog_file_posix.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Go support for leveled logs, analogous to https://github.com/google/glog.
-//
-// Copyright 2023 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build (unix || windows) && !linux
-
-package glog
-
-import (
- "os"
- "syscall"
- "time"
-)
-
-// abortProcess attempts to kill the current process in a way that will dump the
-// currently-running goroutines someplace useful (like stderr).
-//
-// It does this by sending SIGABRT to the current process. Unfortunately, the
-// signal may or may not be delivered to the current thread; in order to do that
-// portably, we would need to add a cgo dependency and call pthread_kill.
-//
-// If successful, abortProcess does not return.
-func abortProcess() error {
- p, err := os.FindProcess(os.Getpid())
- if err != nil {
- return err
- }
- if err := p.Signal(syscall.SIGABRT); err != nil {
- return err
- }
-
- // Sent the signal. Now we wait for it to arrive and any SIGABRT handlers to
- // run (and eventually terminate the process themselves).
- //
- // We could just "select{}" here, but there's an outside chance that would
- // trigger the runtime's deadlock detector if there happen not to be any
- // background goroutines running. So we'll sleep a while first to give
- // the signal some time.
- time.Sleep(10 * time.Second)
- select {}
-}
diff --git a/vendor/github.com/golang/glog/glog_flags.go b/vendor/github.com/golang/glog/glog_flags.go
deleted file mode 100644
index fa4371afd32..00000000000
--- a/vendor/github.com/golang/glog/glog_flags.go
+++ /dev/null
@@ -1,398 +0,0 @@
-// Go support for leveled logs, analogous to https://github.com/google/glog.
-//
-// Copyright 2023 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package glog
-
-import (
- "bytes"
- "errors"
- "flag"
- "fmt"
- "path/filepath"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
-
- "github.com/golang/glog/internal/logsink"
-)
-
-// modulePat contains a filter for the -vmodule flag.
-// It holds a verbosity level and a file pattern to match.
-type modulePat struct {
- pattern string
- literal bool // The pattern is a literal string
- full bool // The pattern wants to match the full path
- level Level
-}
-
-// match reports whether the file matches the pattern. It uses a string
-// comparison if the pattern contains no metacharacters.
-func (m *modulePat) match(full, file string) bool {
- if m.literal {
- if m.full {
- return full == m.pattern
- }
- return file == m.pattern
- }
- if m.full {
- match, _ := filepath.Match(m.pattern, full)
- return match
- }
- match, _ := filepath.Match(m.pattern, file)
- return match
-}
-
-// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
-// that require filepath.Match to be called to match the pattern.
-func isLiteral(pattern string) bool {
- return !strings.ContainsAny(pattern, `\*?[]`)
-}
-
-// isFull reports whether the pattern matches the full file path, that is,
-// whether it contains /.
-func isFull(pattern string) bool {
- return strings.ContainsRune(pattern, '/')
-}
-
-// verboseFlags represents the setting of the -v and -vmodule flags.
-type verboseFlags struct {
- // moduleLevelCache is a sync.Map storing the -vmodule Level for each V()
- // call site, identified by PC. If there is no matching -vmodule filter,
- // the cached value is exactly v. moduleLevelCache is replaced with a new
- // Map whenever the -vmodule or -v flag changes state.
- moduleLevelCache atomic.Value
-
- // mu guards all fields below.
- mu sync.Mutex
-
- // v stores the value of the -v flag. It may be read safely using
- // sync.LoadInt32, but is only modified under mu.
- v Level
-
- // module stores the parsed -vmodule flag.
- module []modulePat
-
- // moduleLength caches len(module). If greater than zero, it
- // means vmodule is enabled. It may be read safely using sync.LoadInt32, but
- // is only modified under mu.
- moduleLength int32
-}
-
-// NOTE: For compatibility with the open-sourced v1 version of this
-// package (github.com/golang/glog) we need to retain that flag.Level
-// implements the flag.Value interface. See also go/log-vs-glog.
-
-// String is part of the flag.Value interface.
-func (l *Level) String() string {
- return strconv.FormatInt(int64(l.Get().(Level)), 10)
-}
-
-// Get is part of the flag.Value interface.
-func (l *Level) Get() any {
- if l == &vflags.v {
- // l is the value registered for the -v flag.
- return Level(atomic.LoadInt32((*int32)(l)))
- }
- return *l
-}
-
-// Set is part of the flag.Value interface.
-func (l *Level) Set(value string) error {
- v, err := strconv.Atoi(value)
- if err != nil {
- return err
- }
- if l == &vflags.v {
- // l is the value registered for the -v flag.
- vflags.mu.Lock()
- defer vflags.mu.Unlock()
- vflags.moduleLevelCache.Store(&sync.Map{})
- atomic.StoreInt32((*int32)(l), int32(v))
- return nil
- }
- *l = Level(v)
- return nil
-}
-
-// vModuleFlag is the flag.Value for the --vmodule flag.
-type vModuleFlag struct{ *verboseFlags }
-
-func (f vModuleFlag) String() string {
- // Do not panic on the zero value.
- // https://groups.google.com/g/golang-nuts/c/Atlr8uAjn6U/m/iId17Td5BQAJ.
- if f.verboseFlags == nil {
- return ""
- }
- f.mu.Lock()
- defer f.mu.Unlock()
-
- var b bytes.Buffer
- for i, f := range f.module {
- if i > 0 {
- b.WriteRune(',')
- }
- fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
- }
- return b.String()
-}
-
-// Get returns nil for this flag type since the struct is not exported.
-func (f vModuleFlag) Get() any { return nil }
-
-var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
-
-// Syntax: -vmodule=recordio=2,foo/bar/baz=1,gfs*=3
-func (f vModuleFlag) Set(value string) error {
- var filter []modulePat
- for _, pat := range strings.Split(value, ",") {
- if len(pat) == 0 {
- // Empty strings such as from a trailing comma can be ignored.
- continue
- }
- patLev := strings.Split(pat, "=")
- if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
- return errVmoduleSyntax
- }
- pattern := patLev[0]
- v, err := strconv.Atoi(patLev[1])
- if err != nil {
- return errors.New("syntax error: expect comma-separated list of filename=N")
- }
- // TODO: check syntax of filter?
- filter = append(filter, modulePat{pattern, isLiteral(pattern), isFull(pattern), Level(v)})
- }
-
- f.mu.Lock()
- defer f.mu.Unlock()
- f.module = filter
- atomic.StoreInt32((*int32)(&f.moduleLength), int32(len(f.module)))
- f.moduleLevelCache.Store(&sync.Map{})
- return nil
-}
-
-func (f *verboseFlags) levelForPC(pc uintptr) Level {
- if level, ok := f.moduleLevelCache.Load().(*sync.Map).Load(pc); ok {
- return level.(Level)
- }
-
- f.mu.Lock()
- defer f.mu.Unlock()
- level := Level(f.v)
- fn := runtime.FuncForPC(pc)
- file, _ := fn.FileLine(pc)
- // The file is something like /a/b/c/d.go. We want just the d for
- // regular matches, /a/b/c/d for full matches.
- file = strings.TrimSuffix(file, ".go")
- full := file
- if slash := strings.LastIndex(file, "/"); slash >= 0 {
- file = file[slash+1:]
- }
- for _, filter := range f.module {
- if filter.match(full, file) {
- level = filter.level
- break // Use the first matching level.
- }
- }
- f.moduleLevelCache.Load().(*sync.Map).Store(pc, level)
- return level
-}
-
-func (f *verboseFlags) enabled(callerDepth int, level Level) bool {
- if atomic.LoadInt32(&f.moduleLength) == 0 {
- // No vmodule values specified, so compare against v level.
- return Level(atomic.LoadInt32((*int32)(&f.v))) >= level
- }
-
- pcs := [1]uintptr{}
- if runtime.Callers(callerDepth+2, pcs[:]) < 1 {
- return false
- }
- frame, _ := runtime.CallersFrames(pcs[:]).Next()
- return f.levelForPC(frame.Entry) >= level
-}
-
-// traceLocation represents an entry in the -log_backtrace_at flag.
-type traceLocation struct {
- file string
- line int
-}
-
-var errTraceSyntax = errors.New("syntax error: expect file.go:234")
-
-func parseTraceLocation(value string) (traceLocation, error) {
- fields := strings.Split(value, ":")
- if len(fields) != 2 {
- return traceLocation{}, errTraceSyntax
- }
- file, lineStr := fields[0], fields[1]
- if !strings.Contains(file, ".") {
- return traceLocation{}, errTraceSyntax
- }
- line, err := strconv.Atoi(lineStr)
- if err != nil {
- return traceLocation{}, errTraceSyntax
- }
- if line < 0 {
- return traceLocation{}, errors.New("negative value for line")
- }
- return traceLocation{file, line}, nil
-}
-
-// match reports whether the specified file and line matches the trace location.
-// The argument file name is the full path, not the basename specified in the flag.
-func (t traceLocation) match(file string, line int) bool {
- if t.line != line {
- return false
- }
- if i := strings.LastIndex(file, "/"); i >= 0 {
- file = file[i+1:]
- }
- return t.file == file
-}
-
-func (t traceLocation) String() string {
- return fmt.Sprintf("%s:%d", t.file, t.line)
-}
-
-// traceLocations represents the -log_backtrace_at flag.
-// Syntax: -log_backtrace_at=recordio.go:234,sstable.go:456
-// Note that unlike vmodule the file extension is included here.
-type traceLocations struct {
- mu sync.Mutex
- locsLen int32 // Safe for atomic read without mu.
- locs []traceLocation
-}
-
-func (t *traceLocations) String() string {
- t.mu.Lock()
- defer t.mu.Unlock()
-
- var buf bytes.Buffer
- for i, tl := range t.locs {
- if i > 0 {
- buf.WriteString(",")
- }
- buf.WriteString(tl.String())
- }
- return buf.String()
-}
-
-// Get always returns nil for this flag type since the struct is not exported
-func (t *traceLocations) Get() any { return nil }
-
-func (t *traceLocations) Set(value string) error {
- var locs []traceLocation
- for _, s := range strings.Split(value, ",") {
- if s == "" {
- continue
- }
- loc, err := parseTraceLocation(s)
- if err != nil {
- return err
- }
- locs = append(locs, loc)
- }
-
- t.mu.Lock()
- defer t.mu.Unlock()
- atomic.StoreInt32(&t.locsLen, int32(len(locs)))
- t.locs = locs
- return nil
-}
-
-func (t *traceLocations) match(file string, line int) bool {
- if atomic.LoadInt32(&t.locsLen) == 0 {
- return false
- }
-
- t.mu.Lock()
- defer t.mu.Unlock()
- for _, tl := range t.locs {
- if tl.match(file, line) {
- return true
- }
- }
- return false
-}
-
-// severityFlag is an atomic flag.Value implementation for logsink.Severity.
-type severityFlag int32
-
-func (s *severityFlag) get() logsink.Severity {
- return logsink.Severity(atomic.LoadInt32((*int32)(s)))
-}
-func (s *severityFlag) String() string { return strconv.FormatInt(int64(*s), 10) }
-func (s *severityFlag) Get() any { return s.get() }
-func (s *severityFlag) Set(value string) error {
- threshold, err := logsink.ParseSeverity(value)
- if err != nil {
- // Not a severity name. Try a raw number.
- v, err := strconv.Atoi(value)
- if err != nil {
- return err
- }
- threshold = logsink.Severity(v)
- if threshold < logsink.Info || threshold > logsink.Fatal {
- return fmt.Errorf("Severity %d out of range (min %d, max %d).", v, logsink.Info, logsink.Fatal)
- }
- }
- atomic.StoreInt32((*int32)(s), int32(threshold))
- return nil
-}
-
-var (
- vflags verboseFlags // The -v and -vmodule flags.
-
- logBacktraceAt traceLocations // The -log_backtrace_at flag.
-
- // Boolean flags. Not handled atomically because the flag.Value interface
- // does not let us avoid the =true, and that shorthand is necessary for
- // compatibility. TODO: does this matter enough to fix? Seems unlikely.
- toStderr bool // The -logtostderr flag.
- alsoToStderr bool // The -alsologtostderr flag.
-
- stderrThreshold severityFlag // The -stderrthreshold flag.
-)
-
-// verboseEnabled returns whether the caller at the given depth should emit
-// verbose logs at the given level, with depth 0 identifying the caller of
-// verboseEnabled.
-func verboseEnabled(callerDepth int, level Level) bool {
- return vflags.enabled(callerDepth+1, level)
-}
-
-// backtraceAt returns whether the logging call at the given function and line
-// should also emit a backtrace of the current call stack.
-func backtraceAt(file string, line int) bool {
- return logBacktraceAt.match(file, line)
-}
-
-func init() {
- vflags.moduleLevelCache.Store(&sync.Map{})
-
- flag.Var(&vflags.v, "v", "log level for V logs")
- flag.Var(vModuleFlag{&vflags}, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
-
- flag.Var(&logBacktraceAt, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
-
- stderrThreshold = severityFlag(logsink.Error)
-
- flag.BoolVar(&toStderr, "logtostderr", false, "log to standard error instead of files")
- flag.BoolVar(&alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
- flag.Var(&stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
-}
diff --git a/vendor/github.com/golang/glog/internal/logsink/logsink.go b/vendor/github.com/golang/glog/internal/logsink/logsink.go
deleted file mode 100644
index 28c38a6abc6..00000000000
--- a/vendor/github.com/golang/glog/internal/logsink/logsink.go
+++ /dev/null
@@ -1,393 +0,0 @@
-// Copyright 2023 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logsink
-
-import (
- "bytes"
- "context"
- "fmt"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/golang/glog/internal/stackdump"
-)
-
-// MaxLogMessageLen is the limit on length of a formatted log message, including
-// the standard line prefix and trailing newline.
-//
-// Chosen to match C++ glog.
-const MaxLogMessageLen = 15000
-
-// A Severity is a severity at which a message can be logged.
-type Severity int8
-
-// These constants identify the log levels in order of increasing severity.
-// A message written to a high-severity log file is also written to each
-// lower-severity log file.
-const (
- Info Severity = iota
- Warning
- Error
-
- // Fatal contains logs written immediately before the process terminates.
- //
- // Sink implementations should not terminate the process themselves: the log
- // package will perform any necessary cleanup and terminate the process as
- // appropriate.
- Fatal
-)
-
-func (s Severity) String() string {
- switch s {
- case Info:
- return "INFO"
- case Warning:
- return "WARNING"
- case Error:
- return "ERROR"
- case Fatal:
- return "FATAL"
- }
- return fmt.Sprintf("%T(%d)", s, s)
-}
-
-// ParseSeverity returns the case-insensitive Severity value for the given string.
-func ParseSeverity(name string) (Severity, error) {
- name = strings.ToUpper(name)
- for s := Info; s <= Fatal; s++ {
- if s.String() == name {
- return s, nil
- }
- }
- return -1, fmt.Errorf("logsink: invalid severity %q", name)
-}
-
-// Meta is metadata about a logging call.
-type Meta struct {
- // The context with which the log call was made (or nil). If set, the context
- // is only valid during the logsink.Structured.Printf call, it should not be
- // retained.
- Context context.Context
-
- // Time is the time at which the log call was made.
- Time time.Time
-
- // File is the source file from which the log entry originates.
- File string
- // Line is the line offset within the source file.
- Line int
- // Depth is the number of stack frames between the logsink and the log call.
- Depth int
-
- Severity Severity
-
- // Verbose indicates whether the call was made via "log.V". Log entries below
- // the current verbosity threshold are not sent to the sink.
- Verbose bool
-
- // Thread ID. This can be populated with a thread ID from another source,
- // such as a system we are importing logs from. In the normal case, this
- // will be set to the process ID (PID), since Go doesn't have threads.
- Thread int64
-
- // Stack trace starting in the logging function. May be nil.
- // A logsink should implement the StackWanter interface to request this.
- //
- // Even if WantStack returns false, this field may be set (e.g. if another
- // sink wants a stack trace).
- Stack *stackdump.Stack
-}
-
-// Structured is a logging destination that accepts structured data as input.
-type Structured interface {
- // Printf formats according to a fmt.Printf format specifier and writes a log
- // entry. The precise result of formatting depends on the sink, but should
- // aim for consistency with fmt.Printf.
- //
- // Printf returns the number of bytes occupied by the log entry, which
- // may not be equal to the total number of bytes written.
- //
- // Printf returns any error encountered *if* it is severe enough that the log
- // package should terminate the process.
- //
- // The sink must not modify the *Meta parameter, nor reference it after
- // Printf has returned: it may be reused in subsequent calls.
- Printf(meta *Meta, format string, a ...any) (n int, err error)
-}
-
-// StackWanter can be implemented by a logsink.Structured to indicate that it
-// wants a stack trace to accompany at least some of the log messages it receives.
-type StackWanter interface {
- // WantStack returns true if the sink requires a stack trace for a log message
- // with this metadata.
- //
- // NOTE: Returning true implies that meta.Stack will be non-nil. Returning
- // false does NOT imply that meta.Stack will be nil.
- WantStack(meta *Meta) bool
-}
-
-// Text is a logging destination that accepts pre-formatted log lines (instead of
-// structured data).
-type Text interface {
- // Enabled returns whether this sink should output messages for the given
- // Meta. If the sink returns false for a given Meta, the Printf function will
- // not call Emit on it for the corresponding log message.
- Enabled(*Meta) bool
-
- // Emit writes a pre-formatted text log entry (including any applicable
- // header) to the log. It returns the number of bytes occupied by the entry
- // (which may differ from the length of the passed-in slice).
- //
- // Emit returns any error encountered *if* it is severe enough that the log
- // package should terminate the process.
- //
- // The sink must not modify the *Meta parameter, nor reference it after
- // Printf has returned: it may be reused in subsequent calls.
- //
- // NOTE: When developing a text sink, keep in mind the surface in which the
- // logs will be displayed, and whether it's important that the sink be
- // resistent to tampering in the style of b/211428300. Standard text sinks
- // (like `stderrSink`) do not protect against this (e.g. by escaping
- // characters) because the cases where they would show user-influenced bytes
- // are vanishingly small.
- Emit(*Meta, []byte) (n int, err error)
-}
-
-// bufs is a pool of *bytes.Buffer used in formatting log entries.
-var bufs sync.Pool // Pool of *bytes.Buffer.
-
-// textPrintf formats a text log entry and emits it to all specified Text sinks.
-//
-// The returned n is the maximum across all Emit calls.
-// The returned err is the first non-nil error encountered.
-// Sinks that are disabled by configuration should return (0, nil).
-func textPrintf(m *Meta, textSinks []Text, format string, args ...any) (n int, err error) {
- // We expect at most file, stderr, and perhaps syslog. If there are more,
- // we'll end up allocating - no big deal.
- const maxExpectedTextSinks = 3
- var noAllocSinks [maxExpectedTextSinks]Text
-
- sinks := noAllocSinks[:0]
- for _, s := range textSinks {
- if s.Enabled(m) {
- sinks = append(sinks, s)
- }
- }
- if len(sinks) == 0 && m.Severity != Fatal {
- return 0, nil // No TextSinks specified; don't bother formatting.
- }
-
- bufi := bufs.Get()
- var buf *bytes.Buffer
- if bufi == nil {
- buf = bytes.NewBuffer(nil)
- bufi = buf
- } else {
- buf = bufi.(*bytes.Buffer)
- buf.Reset()
- }
-
- // Lmmdd hh:mm:ss.uuuuuu PID/GID file:line]
- //
- // The "PID" entry arguably ought to be TID for consistency with other
- // environments, but TID is not meaningful in a Go program due to the
- // multiplexing of goroutines across threads.
- //
- // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
- // It's worth about 3X. Fprintf is hard.
- const severityChar = "IWEF"
- buf.WriteByte(severityChar[m.Severity])
-
- _, month, day := m.Time.Date()
- hour, minute, second := m.Time.Clock()
- twoDigits(buf, int(month))
- twoDigits(buf, day)
- buf.WriteByte(' ')
- twoDigits(buf, hour)
- buf.WriteByte(':')
- twoDigits(buf, minute)
- buf.WriteByte(':')
- twoDigits(buf, second)
- buf.WriteByte('.')
- nDigits(buf, 6, uint64(m.Time.Nanosecond()/1000), '0')
- buf.WriteByte(' ')
-
- nDigits(buf, 7, uint64(m.Thread), ' ')
- buf.WriteByte(' ')
-
- {
- file := m.File
- if i := strings.LastIndex(file, "/"); i >= 0 {
- file = file[i+1:]
- }
- buf.WriteString(file)
- }
-
- buf.WriteByte(':')
- {
- var tmp [19]byte
- buf.Write(strconv.AppendInt(tmp[:0], int64(m.Line), 10))
- }
- buf.WriteString("] ")
-
- msgStart := buf.Len()
- fmt.Fprintf(buf, format, args...)
- if buf.Len() > MaxLogMessageLen-1 {
- buf.Truncate(MaxLogMessageLen - 1)
- }
- msgEnd := buf.Len()
- if b := buf.Bytes(); b[len(b)-1] != '\n' {
- buf.WriteByte('\n')
- }
-
- for _, s := range sinks {
- sn, sErr := s.Emit(m, buf.Bytes())
- if sn > n {
- n = sn
- }
- if sErr != nil && err == nil {
- err = sErr
- }
- }
-
- if m.Severity == Fatal {
- savedM := *m
- fatalMessageStore(savedEntry{
- meta: &savedM,
- msg: buf.Bytes()[msgStart:msgEnd],
- })
- } else {
- bufs.Put(bufi)
- }
- return n, err
-}
-
-const digits = "0123456789"
-
-// twoDigits formats a zero-prefixed two-digit integer to buf.
-func twoDigits(buf *bytes.Buffer, d int) {
- buf.WriteByte(digits[(d/10)%10])
- buf.WriteByte(digits[d%10])
-}
-
-// nDigits formats an n-digit integer to buf, padding with pad on the left. It
-// assumes d != 0.
-func nDigits(buf *bytes.Buffer, n int, d uint64, pad byte) {
- var tmp [20]byte
-
- cutoff := len(tmp) - n
- j := len(tmp) - 1
- for ; d > 0; j-- {
- tmp[j] = digits[d%10]
- d /= 10
- }
- for ; j >= cutoff; j-- {
- tmp[j] = pad
- }
- j++
- buf.Write(tmp[j:])
-}
-
-// Printf writes a log entry to all registered TextSinks in this package, then
-// to all registered StructuredSinks.
-//
-// The returned n is the maximum across all Emit and Printf calls.
-// The returned err is the first non-nil error encountered.
-// Sinks that are disabled by configuration should return (0, nil).
-func Printf(m *Meta, format string, args ...any) (n int, err error) {
- m.Depth++
- n, err = textPrintf(m, TextSinks, format, args...)
-
- for _, sink := range StructuredSinks {
- // TODO: Support TextSinks that implement StackWanter?
- if sw, ok := sink.(StackWanter); ok && sw.WantStack(m) {
- if m.Stack == nil {
- // First, try to find a stacktrace in args, otherwise generate one.
- for _, arg := range args {
- if stack, ok := arg.(stackdump.Stack); ok {
- m.Stack = &stack
- break
- }
- }
- if m.Stack == nil {
- stack := stackdump.Caller( /* skipDepth = */ m.Depth)
- m.Stack = &stack
- }
- }
- }
- sn, sErr := sink.Printf(m, format, args...)
- if sn > n {
- n = sn
- }
- if sErr != nil && err == nil {
- err = sErr
- }
- }
- return n, err
-}
-
-// The sets of sinks to which logs should be written.
-//
-// These must only be modified during package init, and are read-only thereafter.
-var (
- // StructuredSinks is the set of Structured sink instances to which logs
- // should be written.
- StructuredSinks []Structured
-
- // TextSinks is the set of Text sink instances to which logs should be
- // written.
- //
- // These are registered separately from Structured sink implementations to
- // avoid the need to repeat the work of formatting a message for each Text
- // sink that writes it. The package-level Printf function writes to both sets
- // independenty, so a given log destination should only register a Structured
- // *or* a Text sink (not both).
- TextSinks []Text
-)
-
-type savedEntry struct {
- meta *Meta
- msg []byte
-}
-
-// StructuredTextWrapper is a Structured sink which forwards logs to a set of Text sinks.
-//
-// The purpose of this sink is to allow applications to intercept logging calls before they are
-// serialized and sent to Text sinks. For example, if one needs to redact PII from logging
-// arguments before they reach STDERR, one solution would be to do the redacting in a Structured
-// sink that forwards logs to a StructuredTextWrapper instance, and make STDERR a child of that
-// StructuredTextWrapper instance. This is how one could set this up in their application:
-//
-// func init() {
-//
-// wrapper := logsink.StructuredTextWrapper{TextSinks: logsink.TextSinks}
-// // sanitizersink will intercept logs and remove PII
-// sanitizer := sanitizersink{Sink: &wrapper}
-// logsink.StructuredSinks = append(logsink.StructuredSinks, &sanitizer)
-// logsink.TextSinks = nil
-//
-// }
-type StructuredTextWrapper struct {
- // TextSinks is the set of Text sinks that should receive logs from this
- // StructuredTextWrapper instance.
- TextSinks []Text
-}
-
-// Printf forwards logs to all Text sinks registered in the StructuredTextWrapper.
-func (w *StructuredTextWrapper) Printf(meta *Meta, format string, args ...any) (n int, err error) {
- return textPrintf(meta, w.TextSinks, format, args...)
-}
diff --git a/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go b/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go
deleted file mode 100644
index 3dc269abc21..00000000000
--- a/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package logsink
-
-import (
- "sync/atomic"
- "unsafe"
-)
-
-func fatalMessageStore(e savedEntry) {
- // Only put a new one in if we haven't assigned before.
- atomic.CompareAndSwapPointer(&fatalMessage, nil, unsafe.Pointer(&e))
-}
-
-var fatalMessage unsafe.Pointer // savedEntry stored with CompareAndSwapPointer
-
-// FatalMessage returns the Meta and message contents of the first message
-// logged with Fatal severity, or false if none has occurred.
-func FatalMessage() (*Meta, []byte, bool) {
- e := (*savedEntry)(atomic.LoadPointer(&fatalMessage))
- if e == nil {
- return nil, nil, false
- }
- return e.meta, e.msg, true
-}
-
-// DoNotUseRacyFatalMessage is FatalMessage, but worse.
-//
-//go:norace
-//go:nosplit
-func DoNotUseRacyFatalMessage() (*Meta, []byte, bool) {
- e := (*savedEntry)(fatalMessage)
- if e == nil {
- return nil, nil, false
- }
- return e.meta, e.msg, true
-}
diff --git a/vendor/github.com/golang/glog/internal/stackdump/stackdump.go b/vendor/github.com/golang/glog/internal/stackdump/stackdump.go
deleted file mode 100644
index 3427c9d6bd0..00000000000
--- a/vendor/github.com/golang/glog/internal/stackdump/stackdump.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2023 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package stackdump provides wrappers for runtime.Stack and runtime.Callers
-// with uniform support for skipping caller frames.
-//
-// ⚠ Unlike the functions in the runtime package, these may allocate a
-// non-trivial quantity of memory: use them with care. ⚠
-package stackdump
-
-import (
- "bytes"
- "runtime"
-)
-
-// runtimeStackSelfFrames is 1 if runtime.Stack includes the call to
-// runtime.Stack itself or 0 if it does not.
-//
-// As of 2016-04-27, the gccgo compiler includes runtime.Stack but the gc
-// compiler does not.
-var runtimeStackSelfFrames = func() int {
- for n := 1 << 10; n < 1<<20; n *= 2 {
- buf := make([]byte, n)
- n := runtime.Stack(buf, false)
- if bytes.Contains(buf[:n], []byte("runtime.Stack")) {
- return 1
- } else if n < len(buf) || bytes.Count(buf, []byte("\n")) >= 3 {
- return 0
- }
- }
- return 0
-}()
-
-// Stack is a stack dump for a single goroutine.
-type Stack struct {
- // Text is a representation of the stack dump in a human-readable format.
- Text []byte
-
- // PC is a representation of the stack dump using raw program counter values.
- PC []uintptr
-}
-
-func (s Stack) String() string { return string(s.Text) }
-
-// Caller returns the Stack dump for the calling goroutine, starting skipDepth
-// frames before the caller of Caller. (Caller(0) provides a dump starting at
-// the caller of this function.)
-func Caller(skipDepth int) Stack {
- return Stack{
- Text: CallerText(skipDepth + 1),
- PC: CallerPC(skipDepth + 1),
- }
-}
-
-// CallerText returns a textual dump of the stack starting skipDepth frames before
-// the caller. (CallerText(0) provides a dump starting at the caller of this
-// function.)
-func CallerText(skipDepth int) []byte {
- for n := 1 << 10; ; n *= 2 {
- buf := make([]byte, n)
- n := runtime.Stack(buf, false)
- if n < len(buf) {
- return pruneFrames(skipDepth+1+runtimeStackSelfFrames, buf[:n])
- }
- }
-}
-
-// CallerPC returns a dump of the program counters of the stack starting
-// skipDepth frames before the caller. (CallerPC(0) provides a dump starting at
-// the caller of this function.)
-func CallerPC(skipDepth int) []uintptr {
- for n := 1 << 8; ; n *= 2 {
- buf := make([]uintptr, n)
- n := runtime.Callers(skipDepth+2, buf)
- if n < len(buf) {
- return buf[:n]
- }
- }
-}
-
-// pruneFrames removes the topmost skipDepth frames of the first goroutine in a
-// textual stack dump. It overwrites the passed-in slice.
-//
-// If there are fewer than skipDepth frames in the first goroutine's stack,
-// pruneFrames prunes it to an empty stack and leaves the remaining contents
-// intact.
-func pruneFrames(skipDepth int, stack []byte) []byte {
- headerLen := 0
- for i, c := range stack {
- if c == '\n' {
- headerLen = i + 1
- break
- }
- }
- if headerLen == 0 {
- return stack // No header line - not a well-formed stack trace.
- }
-
- skipLen := headerLen
- skipNewlines := skipDepth * 2
- for ; skipLen < len(stack) && skipNewlines > 0; skipLen++ {
- c := stack[skipLen]
- if c != '\n' {
- continue
- }
- skipNewlines--
- skipLen++
- if skipNewlines == 0 || skipLen == len(stack) || stack[skipLen] == '\n' {
- break
- }
- }
-
- pruned := stack[skipLen-headerLen:]
- copy(pruned, stack[:headerLen])
- return pruned
-}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go
deleted file mode 100644
index c6f66f10393..00000000000
--- a/vendor/github.com/golang/protobuf/jsonpb/decode.go
+++ /dev/null
@@ -1,531 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package jsonpb
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "math"
- "reflect"
- "strconv"
- "strings"
- "time"
-
- "github.com/golang/protobuf/proto"
- "google.golang.org/protobuf/encoding/protojson"
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-const wrapJSONUnmarshalV2 = false
-
-// UnmarshalNext unmarshals the next JSON object from d into m.
-func UnmarshalNext(d *json.Decoder, m proto.Message) error {
- return new(Unmarshaler).UnmarshalNext(d, m)
-}
-
-// Unmarshal unmarshals a JSON object from r into m.
-func Unmarshal(r io.Reader, m proto.Message) error {
- return new(Unmarshaler).Unmarshal(r, m)
-}
-
-// UnmarshalString unmarshals a JSON object from s into m.
-func UnmarshalString(s string, m proto.Message) error {
- return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
-}
-
-// Unmarshaler is a configurable object for converting from a JSON
-// representation to a protocol buffer object.
-type Unmarshaler struct {
- // AllowUnknownFields specifies whether to allow messages to contain
- // unknown JSON fields, as opposed to failing to unmarshal.
- AllowUnknownFields bool
-
- // AnyResolver is used to resolve the google.protobuf.Any well-known type.
- // If unset, the global registry is used by default.
- AnyResolver AnyResolver
-}
-
-// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
-// they are unmarshaled from JSON. Messages that implement this should also
-// implement JSONPBMarshaler so that the custom format can be produced.
-//
-// The JSON unmarshaling must follow the JSON to proto specification:
-//
-// https://developers.google.com/protocol-buffers/docs/proto3#json
-//
-// Deprecated: Custom types should implement protobuf reflection instead.
-type JSONPBUnmarshaler interface {
- UnmarshalJSONPB(*Unmarshaler, []byte) error
-}
-
-// Unmarshal unmarshals a JSON object from r into m.
-func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
- return u.UnmarshalNext(json.NewDecoder(r), m)
-}
-
-// UnmarshalNext unmarshals the next JSON object from d into m.
-func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
- if m == nil {
- return errors.New("invalid nil message")
- }
-
- // Parse the next JSON object from the stream.
- raw := json.RawMessage{}
- if err := d.Decode(&raw); err != nil {
- return err
- }
-
- // Check for custom unmarshalers first since they may not properly
- // implement protobuf reflection that the logic below relies on.
- if jsu, ok := m.(JSONPBUnmarshaler); ok {
- return jsu.UnmarshalJSONPB(u, raw)
- }
-
- mr := proto.MessageReflect(m)
-
- // NOTE: For historical reasons, a top-level null is treated as a noop.
- // This is incorrect, but kept for compatibility.
- if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
- return nil
- }
-
- if wrapJSONUnmarshalV2 {
- // NOTE: If input message is non-empty, we need to preserve merge semantics
- // of the old jsonpb implementation. These semantics are not supported by
- // the protobuf JSON specification.
- isEmpty := true
- mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
- isEmpty = false // at least one iteration implies non-empty
- return false
- })
- if !isEmpty {
- // Perform unmarshaling into a newly allocated, empty message.
- mr = mr.New()
-
- // Use a defer to copy all unmarshaled fields into the original message.
- dst := proto.MessageReflect(m)
- defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- dst.Set(fd, v)
- return true
- })
- }
-
- // Unmarshal using the v2 JSON unmarshaler.
- opts := protojson.UnmarshalOptions{
- DiscardUnknown: u.AllowUnknownFields,
- }
- if u.AnyResolver != nil {
- opts.Resolver = anyResolver{u.AnyResolver}
- }
- return opts.Unmarshal(raw, mr.Interface())
- } else {
- if err := u.unmarshalMessage(mr, raw); err != nil {
- return err
- }
- return protoV2.CheckInitialized(mr.Interface())
- }
-}
-
-func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
- md := m.Descriptor()
- fds := md.Fields()
-
- if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
- return jsu.UnmarshalJSONPB(u, in)
- }
-
- if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
- return nil
- }
-
- switch wellKnownType(md.FullName()) {
- case "Any":
- var jsonObject map[string]json.RawMessage
- if err := json.Unmarshal(in, &jsonObject); err != nil {
- return err
- }
-
- rawTypeURL, ok := jsonObject["@type"]
- if !ok {
- return errors.New("Any JSON doesn't have '@type'")
- }
- typeURL, err := unquoteString(string(rawTypeURL))
- if err != nil {
- return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
- }
- m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
-
- var m2 protoreflect.Message
- if u.AnyResolver != nil {
- mi, err := u.AnyResolver.Resolve(typeURL)
- if err != nil {
- return err
- }
- m2 = proto.MessageReflect(mi)
- } else {
- mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
- if err != nil {
- if err == protoregistry.NotFound {
- return fmt.Errorf("could not resolve Any message type: %v", typeURL)
- }
- return err
- }
- m2 = mt.New()
- }
-
- if wellKnownType(m2.Descriptor().FullName()) != "" {
- rawValue, ok := jsonObject["value"]
- if !ok {
- return errors.New("Any JSON doesn't have 'value'")
- }
- if err := u.unmarshalMessage(m2, rawValue); err != nil {
- return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
- }
- } else {
- delete(jsonObject, "@type")
- rawJSON, err := json.Marshal(jsonObject)
- if err != nil {
- return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
- }
- if err = u.unmarshalMessage(m2, rawJSON); err != nil {
- return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
- }
- }
-
- rawWire, err := protoV2.Marshal(m2.Interface())
- if err != nil {
- return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
- }
- m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
- return nil
- case "BoolValue", "BytesValue", "StringValue",
- "Int32Value", "UInt32Value", "FloatValue",
- "Int64Value", "UInt64Value", "DoubleValue":
- fd := fds.ByNumber(1)
- v, err := u.unmarshalValue(m.NewField(fd), in, fd)
- if err != nil {
- return err
- }
- m.Set(fd, v)
- return nil
- case "Duration":
- v, err := unquoteString(string(in))
- if err != nil {
- return err
- }
- d, err := time.ParseDuration(v)
- if err != nil {
- return fmt.Errorf("bad Duration: %v", err)
- }
-
- sec := d.Nanoseconds() / 1e9
- nsec := d.Nanoseconds() % 1e9
- m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
- m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
- return nil
- case "Timestamp":
- v, err := unquoteString(string(in))
- if err != nil {
- return err
- }
- t, err := time.Parse(time.RFC3339Nano, v)
- if err != nil {
- return fmt.Errorf("bad Timestamp: %v", err)
- }
-
- sec := t.Unix()
- nsec := t.Nanosecond()
- m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
- m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
- return nil
- case "Value":
- switch {
- case string(in) == "null":
- m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
- case string(in) == "true":
- m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
- case string(in) == "false":
- m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
- case hasPrefixAndSuffix('"', in, '"'):
- s, err := unquoteString(string(in))
- if err != nil {
- return fmt.Errorf("unrecognized type for Value %q", in)
- }
- m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
- case hasPrefixAndSuffix('[', in, ']'):
- v := m.Mutable(fds.ByNumber(6))
- return u.unmarshalMessage(v.Message(), in)
- case hasPrefixAndSuffix('{', in, '}'):
- v := m.Mutable(fds.ByNumber(5))
- return u.unmarshalMessage(v.Message(), in)
- default:
- f, err := strconv.ParseFloat(string(in), 0)
- if err != nil {
- return fmt.Errorf("unrecognized type for Value %q", in)
- }
- m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
- }
- return nil
- case "ListValue":
- var jsonArray []json.RawMessage
- if err := json.Unmarshal(in, &jsonArray); err != nil {
- return fmt.Errorf("bad ListValue: %v", err)
- }
-
- lv := m.Mutable(fds.ByNumber(1)).List()
- for _, raw := range jsonArray {
- ve := lv.NewElement()
- if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
- return err
- }
- lv.Append(ve)
- }
- return nil
- case "Struct":
- var jsonObject map[string]json.RawMessage
- if err := json.Unmarshal(in, &jsonObject); err != nil {
- return fmt.Errorf("bad StructValue: %v", err)
- }
-
- mv := m.Mutable(fds.ByNumber(1)).Map()
- for key, raw := range jsonObject {
- kv := protoreflect.ValueOf(key).MapKey()
- vv := mv.NewValue()
- if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
- return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
- }
- mv.Set(kv, vv)
- }
- return nil
- }
-
- var jsonObject map[string]json.RawMessage
- if err := json.Unmarshal(in, &jsonObject); err != nil {
- return err
- }
-
- // Handle known fields.
- for i := 0; i < fds.Len(); i++ {
- fd := fds.Get(i)
- if fd.IsWeak() && fd.Message().IsPlaceholder() {
- continue // weak reference is not linked in
- }
-
- // Search for any raw JSON value associated with this field.
- var raw json.RawMessage
- name := string(fd.Name())
- if fd.Kind() == protoreflect.GroupKind {
- name = string(fd.Message().Name())
- }
- if v, ok := jsonObject[name]; ok {
- delete(jsonObject, name)
- raw = v
- }
- name = string(fd.JSONName())
- if v, ok := jsonObject[name]; ok {
- delete(jsonObject, name)
- raw = v
- }
-
- field := m.NewField(fd)
- // Unmarshal the field value.
- if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
- continue
- }
- v, err := u.unmarshalValue(field, raw, fd)
- if err != nil {
- return err
- }
- m.Set(fd, v)
- }
-
- // Handle extension fields.
- for name, raw := range jsonObject {
- if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
- continue
- }
-
- // Resolve the extension field by name.
- xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
- xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
- if xt == nil && isMessageSet(md) {
- xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
- }
- if xt == nil {
- continue
- }
- delete(jsonObject, name)
- fd := xt.TypeDescriptor()
- if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
- return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
- }
-
- field := m.NewField(fd)
- // Unmarshal the field value.
- if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
- continue
- }
- v, err := u.unmarshalValue(field, raw, fd)
- if err != nil {
- return err
- }
- m.Set(fd, v)
- }
-
- if !u.AllowUnknownFields && len(jsonObject) > 0 {
- for name := range jsonObject {
- return fmt.Errorf("unknown field %q in %v", name, md.FullName())
- }
- }
- return nil
-}
-
-func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
- if fd.Cardinality() == protoreflect.Repeated {
- return false
- }
- if md := fd.Message(); md != nil {
- return md.FullName() == "google.protobuf.Value"
- }
- if ed := fd.Enum(); ed != nil {
- return ed.FullName() == "google.protobuf.NullValue"
- }
- return false
-}
-
-func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
- if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
- _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
- return ok
- }
- return false
-}
-
-func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
- switch {
- case fd.IsList():
- var jsonArray []json.RawMessage
- if err := json.Unmarshal(in, &jsonArray); err != nil {
- return v, err
- }
- lv := v.List()
- for _, raw := range jsonArray {
- ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
- if err != nil {
- return v, err
- }
- lv.Append(ve)
- }
- return v, nil
- case fd.IsMap():
- var jsonObject map[string]json.RawMessage
- if err := json.Unmarshal(in, &jsonObject); err != nil {
- return v, err
- }
- kfd := fd.MapKey()
- vfd := fd.MapValue()
- mv := v.Map()
- for key, raw := range jsonObject {
- var kv protoreflect.MapKey
- if kfd.Kind() == protoreflect.StringKind {
- kv = protoreflect.ValueOf(key).MapKey()
- } else {
- v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
- if err != nil {
- return v, err
- }
- kv = v.MapKey()
- }
-
- vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
- if err != nil {
- return v, err
- }
- mv.Set(kv, vv)
- }
- return v, nil
- default:
- return u.unmarshalSingularValue(v, in, fd)
- }
-}
-
-var nonFinite = map[string]float64{
- `"NaN"`: math.NaN(),
- `"Infinity"`: math.Inf(+1),
- `"-Infinity"`: math.Inf(-1),
-}
-
-func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
- switch fd.Kind() {
- case protoreflect.BoolKind:
- return unmarshalValue(in, new(bool))
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
- return unmarshalValue(trimQuote(in), new(int32))
- case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
- return unmarshalValue(trimQuote(in), new(int64))
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
- return unmarshalValue(trimQuote(in), new(uint32))
- case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
- return unmarshalValue(trimQuote(in), new(uint64))
- case protoreflect.FloatKind:
- if f, ok := nonFinite[string(in)]; ok {
- return protoreflect.ValueOfFloat32(float32(f)), nil
- }
- return unmarshalValue(trimQuote(in), new(float32))
- case protoreflect.DoubleKind:
- if f, ok := nonFinite[string(in)]; ok {
- return protoreflect.ValueOfFloat64(float64(f)), nil
- }
- return unmarshalValue(trimQuote(in), new(float64))
- case protoreflect.StringKind:
- return unmarshalValue(in, new(string))
- case protoreflect.BytesKind:
- return unmarshalValue(in, new([]byte))
- case protoreflect.EnumKind:
- if hasPrefixAndSuffix('"', in, '"') {
- vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
- if vd == nil {
- return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
- }
- return protoreflect.ValueOfEnum(vd.Number()), nil
- }
- return unmarshalValue(in, new(protoreflect.EnumNumber))
- case protoreflect.MessageKind, protoreflect.GroupKind:
- err := u.unmarshalMessage(v.Message(), in)
- return v, err
- default:
- panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
- }
-}
-
-func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
- err := json.Unmarshal(in, v)
- return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
-}
-
-func unquoteString(in string) (out string, err error) {
- err = json.Unmarshal([]byte(in), &out)
- return out, err
-}
-
-func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
- if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
- return true
- }
- return false
-}
-
-// trimQuote is like unquoteString but simply strips surrounding quotes.
-// This is incorrect, but is behavior done by the legacy implementation.
-func trimQuote(in []byte) []byte {
- if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
- in = in[1 : len(in)-1]
- }
- return in
-}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go
deleted file mode 100644
index e9438a93f33..00000000000
--- a/vendor/github.com/golang/protobuf/jsonpb/encode.go
+++ /dev/null
@@ -1,560 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package jsonpb
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "math"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/golang/protobuf/proto"
- "google.golang.org/protobuf/encoding/protojson"
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-const wrapJSONMarshalV2 = false
-
-// Marshaler is a configurable object for marshaling protocol buffer messages
-// to the specified JSON representation.
-type Marshaler struct {
- // OrigName specifies whether to use the original protobuf name for fields.
- OrigName bool
-
- // EnumsAsInts specifies whether to render enum values as integers,
- // as opposed to string values.
- EnumsAsInts bool
-
- // EmitDefaults specifies whether to render fields with zero values.
- EmitDefaults bool
-
- // Indent controls whether the output is compact or not.
- // If empty, the output is compact JSON. Otherwise, every JSON object
- // entry and JSON array value will be on its own line.
- // Each line will be preceded by repeated copies of Indent, where the
- // number of copies is the current indentation depth.
- Indent string
-
- // AnyResolver is used to resolve the google.protobuf.Any well-known type.
- // If unset, the global registry is used by default.
- AnyResolver AnyResolver
-}
-
-// JSONPBMarshaler is implemented by protobuf messages that customize the
-// way they are marshaled to JSON. Messages that implement this should also
-// implement JSONPBUnmarshaler so that the custom format can be parsed.
-//
-// The JSON marshaling must follow the proto to JSON specification:
-//
-// https://developers.google.com/protocol-buffers/docs/proto3#json
-//
-// Deprecated: Custom types should implement protobuf reflection instead.
-type JSONPBMarshaler interface {
- MarshalJSONPB(*Marshaler) ([]byte, error)
-}
-
-// Marshal serializes a protobuf message as JSON into w.
-func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
- b, err := jm.marshal(m)
- if len(b) > 0 {
- if _, err := w.Write(b); err != nil {
- return err
- }
- }
- return err
-}
-
-// MarshalToString serializes a protobuf message as JSON in string form.
-func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
- b, err := jm.marshal(m)
- if err != nil {
- return "", err
- }
- return string(b), nil
-}
-
-func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
- v := reflect.ValueOf(m)
- if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
- return nil, errors.New("Marshal called with nil")
- }
-
- // Check for custom marshalers first since they may not properly
- // implement protobuf reflection that the logic below relies on.
- if jsm, ok := m.(JSONPBMarshaler); ok {
- return jsm.MarshalJSONPB(jm)
- }
-
- if wrapJSONMarshalV2 {
- opts := protojson.MarshalOptions{
- UseProtoNames: jm.OrigName,
- UseEnumNumbers: jm.EnumsAsInts,
- EmitUnpopulated: jm.EmitDefaults,
- Indent: jm.Indent,
- }
- if jm.AnyResolver != nil {
- opts.Resolver = anyResolver{jm.AnyResolver}
- }
- return opts.Marshal(proto.MessageReflect(m).Interface())
- } else {
- // Check for unpopulated required fields first.
- m2 := proto.MessageReflect(m)
- if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
- return nil, err
- }
-
- w := jsonWriter{Marshaler: jm}
- err := w.marshalMessage(m2, "", "")
- return w.buf, err
- }
-}
-
-type jsonWriter struct {
- *Marshaler
- buf []byte
-}
-
-func (w *jsonWriter) write(s string) {
- w.buf = append(w.buf, s...)
-}
-
-func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
- if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
- b, err := jsm.MarshalJSONPB(w.Marshaler)
- if err != nil {
- return err
- }
- if typeURL != "" {
- // we are marshaling this object to an Any type
- var js map[string]*json.RawMessage
- if err = json.Unmarshal(b, &js); err != nil {
- return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
- }
- turl, err := json.Marshal(typeURL)
- if err != nil {
- return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
- }
- js["@type"] = (*json.RawMessage)(&turl)
- if b, err = json.Marshal(js); err != nil {
- return err
- }
- }
- w.write(string(b))
- return nil
- }
-
- md := m.Descriptor()
- fds := md.Fields()
-
- // Handle well-known types.
- const secondInNanos = int64(time.Second / time.Nanosecond)
- switch wellKnownType(md.FullName()) {
- case "Any":
- return w.marshalAny(m, indent)
- case "BoolValue", "BytesValue", "StringValue",
- "Int32Value", "UInt32Value", "FloatValue",
- "Int64Value", "UInt64Value", "DoubleValue":
- fd := fds.ByNumber(1)
- return w.marshalValue(fd, m.Get(fd), indent)
- case "Duration":
- const maxSecondsInDuration = 315576000000
- // "Generated output always contains 0, 3, 6, or 9 fractional digits,
- // depending on required precision."
- s := m.Get(fds.ByNumber(1)).Int()
- ns := m.Get(fds.ByNumber(2)).Int()
- if s < -maxSecondsInDuration || s > maxSecondsInDuration {
- return fmt.Errorf("seconds out of range %v", s)
- }
- if ns <= -secondInNanos || ns >= secondInNanos {
- return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
- }
- if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
- return errors.New("signs of seconds and nanos do not match")
- }
- var sign string
- if s < 0 || ns < 0 {
- sign, s, ns = "-", -1*s, -1*ns
- }
- x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, ".000")
- w.write(fmt.Sprintf(`"%vs"`, x))
- return nil
- case "Timestamp":
- // "RFC 3339, where generated output will always be Z-normalized
- // and uses 0, 3, 6 or 9 fractional digits."
- s := m.Get(fds.ByNumber(1)).Int()
- ns := m.Get(fds.ByNumber(2)).Int()
- if ns < 0 || ns >= secondInNanos {
- return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
- }
- t := time.Unix(s, ns).UTC()
- // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
- x := t.Format("2006-01-02T15:04:05.000000000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, ".000")
- w.write(fmt.Sprintf(`"%vZ"`, x))
- return nil
- case "Value":
- // JSON value; which is a null, number, string, bool, object, or array.
- od := md.Oneofs().Get(0)
- fd := m.WhichOneof(od)
- if fd == nil {
- return errors.New("nil Value")
- }
- return w.marshalValue(fd, m.Get(fd), indent)
- case "Struct", "ListValue":
- // JSON object or array.
- fd := fds.ByNumber(1)
- return w.marshalValue(fd, m.Get(fd), indent)
- }
-
- w.write("{")
- if w.Indent != "" {
- w.write("\n")
- }
-
- firstField := true
- if typeURL != "" {
- if err := w.marshalTypeURL(indent, typeURL); err != nil {
- return err
- }
- firstField = false
- }
-
- for i := 0; i < fds.Len(); {
- fd := fds.Get(i)
- if od := fd.ContainingOneof(); od != nil {
- fd = m.WhichOneof(od)
- i += od.Fields().Len()
- if fd == nil {
- continue
- }
- } else {
- i++
- }
-
- v := m.Get(fd)
-
- if !m.Has(fd) {
- if !w.EmitDefaults || fd.ContainingOneof() != nil {
- continue
- }
- if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
- v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
- }
- }
-
- if !firstField {
- w.writeComma()
- }
- if err := w.marshalField(fd, v, indent); err != nil {
- return err
- }
- firstField = false
- }
-
- // Handle proto2 extensions.
- if md.ExtensionRanges().Len() > 0 {
- // Collect a sorted list of all extension descriptor and values.
- type ext struct {
- desc protoreflect.FieldDescriptor
- val protoreflect.Value
- }
- var exts []ext
- m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- if fd.IsExtension() {
- exts = append(exts, ext{fd, v})
- }
- return true
- })
- sort.Slice(exts, func(i, j int) bool {
- return exts[i].desc.Number() < exts[j].desc.Number()
- })
-
- for _, ext := range exts {
- if !firstField {
- w.writeComma()
- }
- if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
- return err
- }
- firstField = false
- }
- }
-
- if w.Indent != "" {
- w.write("\n")
- w.write(indent)
- }
- w.write("}")
- return nil
-}
-
-func (w *jsonWriter) writeComma() {
- if w.Indent != "" {
- w.write(",\n")
- } else {
- w.write(",")
- }
-}
-
-func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
- // "If the Any contains a value that has a special JSON mapping,
- // it will be converted as follows: {"@type": xxx, "value": yyy}.
- // Otherwise, the value will be converted into a JSON object,
- // and the "@type" field will be inserted to indicate the actual data type."
- md := m.Descriptor()
- typeURL := m.Get(md.Fields().ByNumber(1)).String()
- rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
-
- var m2 protoreflect.Message
- if w.AnyResolver != nil {
- mi, err := w.AnyResolver.Resolve(typeURL)
- if err != nil {
- return err
- }
- m2 = proto.MessageReflect(mi)
- } else {
- mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
- if err != nil {
- return err
- }
- m2 = mt.New()
- }
-
- if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
- return err
- }
-
- if wellKnownType(m2.Descriptor().FullName()) == "" {
- return w.marshalMessage(m2, indent, typeURL)
- }
-
- w.write("{")
- if w.Indent != "" {
- w.write("\n")
- }
- if err := w.marshalTypeURL(indent, typeURL); err != nil {
- return err
- }
- w.writeComma()
- if w.Indent != "" {
- w.write(indent)
- w.write(w.Indent)
- w.write(`"value": `)
- } else {
- w.write(`"value":`)
- }
- if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
- return err
- }
- if w.Indent != "" {
- w.write("\n")
- w.write(indent)
- }
- w.write("}")
- return nil
-}
-
-func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
- if w.Indent != "" {
- w.write(indent)
- w.write(w.Indent)
- }
- w.write(`"@type":`)
- if w.Indent != "" {
- w.write(" ")
- }
- b, err := json.Marshal(typeURL)
- if err != nil {
- return err
- }
- w.write(string(b))
- return nil
-}
-
-// marshalField writes field description and value to the Writer.
-func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
- if w.Indent != "" {
- w.write(indent)
- w.write(w.Indent)
- }
- w.write(`"`)
- switch {
- case fd.IsExtension():
- // For message set, use the fname of the message as the extension name.
- name := string(fd.FullName())
- if isMessageSet(fd.ContainingMessage()) {
- name = strings.TrimSuffix(name, ".message_set_extension")
- }
-
- w.write("[" + name + "]")
- case w.OrigName:
- name := string(fd.Name())
- if fd.Kind() == protoreflect.GroupKind {
- name = string(fd.Message().Name())
- }
- w.write(name)
- default:
- w.write(string(fd.JSONName()))
- }
- w.write(`":`)
- if w.Indent != "" {
- w.write(" ")
- }
- return w.marshalValue(fd, v, indent)
-}
-
-func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
- switch {
- case fd.IsList():
- w.write("[")
- comma := ""
- lv := v.List()
- for i := 0; i < lv.Len(); i++ {
- w.write(comma)
- if w.Indent != "" {
- w.write("\n")
- w.write(indent)
- w.write(w.Indent)
- w.write(w.Indent)
- }
- if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
- return err
- }
- comma = ","
- }
- if w.Indent != "" {
- w.write("\n")
- w.write(indent)
- w.write(w.Indent)
- }
- w.write("]")
- return nil
- case fd.IsMap():
- kfd := fd.MapKey()
- vfd := fd.MapValue()
- mv := v.Map()
-
- // Collect a sorted list of all map keys and values.
- type entry struct{ key, val protoreflect.Value }
- var entries []entry
- mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
- entries = append(entries, entry{k.Value(), v})
- return true
- })
- sort.Slice(entries, func(i, j int) bool {
- switch kfd.Kind() {
- case protoreflect.BoolKind:
- return !entries[i].key.Bool() && entries[j].key.Bool()
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
- return entries[i].key.Int() < entries[j].key.Int()
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
- return entries[i].key.Uint() < entries[j].key.Uint()
- case protoreflect.StringKind:
- return entries[i].key.String() < entries[j].key.String()
- default:
- panic("invalid kind")
- }
- })
-
- w.write(`{`)
- comma := ""
- for _, entry := range entries {
- w.write(comma)
- if w.Indent != "" {
- w.write("\n")
- w.write(indent)
- w.write(w.Indent)
- w.write(w.Indent)
- }
-
- s := fmt.Sprint(entry.key.Interface())
- b, err := json.Marshal(s)
- if err != nil {
- return err
- }
- w.write(string(b))
-
- w.write(`:`)
- if w.Indent != "" {
- w.write(` `)
- }
-
- if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
- return err
- }
- comma = ","
- }
- if w.Indent != "" {
- w.write("\n")
- w.write(indent)
- w.write(w.Indent)
- }
- w.write(`}`)
- return nil
- default:
- return w.marshalSingularValue(fd, v, indent)
- }
-}
-
-func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
- switch {
- case !v.IsValid():
- w.write("null")
- return nil
- case fd.Message() != nil:
- return w.marshalMessage(v.Message(), indent+w.Indent, "")
- case fd.Enum() != nil:
- if fd.Enum().FullName() == "google.protobuf.NullValue" {
- w.write("null")
- return nil
- }
-
- vd := fd.Enum().Values().ByNumber(v.Enum())
- if vd == nil || w.EnumsAsInts {
- w.write(strconv.Itoa(int(v.Enum())))
- } else {
- w.write(`"` + string(vd.Name()) + `"`)
- }
- return nil
- default:
- switch v.Interface().(type) {
- case float32, float64:
- switch {
- case math.IsInf(v.Float(), +1):
- w.write(`"Infinity"`)
- return nil
- case math.IsInf(v.Float(), -1):
- w.write(`"-Infinity"`)
- return nil
- case math.IsNaN(v.Float()):
- w.write(`"NaN"`)
- return nil
- }
- case int64, uint64:
- w.write(fmt.Sprintf(`"%d"`, v.Interface()))
- return nil
- }
-
- b, err := json.Marshal(v.Interface())
- if err != nil {
- return err
- }
- w.write(string(b))
- return nil
- }
-}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go
deleted file mode 100644
index 480e2448de6..00000000000
--- a/vendor/github.com/golang/protobuf/jsonpb/json.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package jsonpb provides functionality to marshal and unmarshal between a
-// protocol buffer message and JSON. It follows the specification at
-// https://developers.google.com/protocol-buffers/docs/proto3#json.
-//
-// Do not rely on the default behavior of the standard encoding/json package
-// when called on generated message types as it does not operate correctly.
-//
-// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
-// package instead.
-package jsonpb
-
-import (
- "github.com/golang/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-// AnyResolver takes a type URL, present in an Any message,
-// and resolves it into an instance of the associated message.
-type AnyResolver interface {
- Resolve(typeURL string) (proto.Message, error)
-}
-
-type anyResolver struct{ AnyResolver }
-
-func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
- return r.FindMessageByURL(string(message))
-}
-
-func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
- m, err := r.Resolve(url)
- if err != nil {
- return nil, err
- }
- return protoimpl.X.MessageTypeOf(m), nil
-}
-
-func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
- return protoregistry.GlobalTypes.FindExtensionByName(field)
-}
-
-func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
- return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
-}
-
-func wellKnownType(s protoreflect.FullName) string {
- if s.Parent() == "google.protobuf" {
- switch s.Name() {
- case "Empty", "Any",
- "BoolValue", "BytesValue", "StringValue",
- "Int32Value", "UInt32Value", "FloatValue",
- "Int64Value", "UInt64Value", "DoubleValue",
- "Duration", "Timestamp",
- "NullValue", "Struct", "Value", "ListValue":
- return string(s.Name())
- }
- }
- return ""
-}
-
-func isMessageSet(md protoreflect.MessageDescriptor) bool {
- ms, ok := md.(interface{ IsMessageSet() bool })
- return ok && ms.IsMessageSet()
-}
diff --git a/vendor/github.com/google/go-github/v27/AUTHORS b/vendor/github.com/google/go-github/v27/AUTHORS
deleted file mode 100644
index d38236ed45f..00000000000
--- a/vendor/github.com/google/go-github/v27/AUTHORS
+++ /dev/null
@@ -1,251 +0,0 @@
-# This is the official list of go-github authors for copyright purposes.
-#
-# This does not necessarily list everyone who has contributed code, since in
-# some cases, their employer may be the copyright holder. To see the full list
-# of contributors, see the revision history in source control or
-# https://github.com/google/go-github/graphs/contributors.
-#
-# Authors who wish to be recognized in this file should add themselves (or
-# their employer, as appropriate).
-
-178inaba
-Abhinav Gupta
-adrienzieba
-Ahmed Hagy
-Aidan Steele
-Ainsley Chong
-Akeda Bagus
-Akhil Mohan
-Alec Thomas
-Aleks Clark
-Alex Bramley
-Alexander Harkness
-Allen Sun
-Amey Sakhadeo
-Andreas Garnæs
-Andrew Ryabchun
-Andy Grunwald
-Andy Hume
-Andy Lindeman
-anjanashenoy
-Anshuman Bhartiya
-Antoine
-Antoine Pelisse
-Anubha Kushwaha
-appilon
-Aravind
-Arda Kuyumcu
-Arıl Bozoluk
-Austin Dizzy
-Ben Batha
-Benjamen Keroack
-Beshr Kayali
-Beyang Liu
-Billy Lynch
-Björn Häuser
-Brad Harris
-Brad Moylan
-Bradley Falzon
-Brandon Cook
-Brian Egizi
-Bryan Boreham
-Cami Diez
-Carlos Alexandro Becker
-Carlos Tadeu Panato Junior
-chandresh-pancholi
-Charles Fenwick Elliott
-Charlie Yan
-Chris King
-Chris Roche
-Chris Schaefer
-chrisforrette
-Christian Muehlhaeuser
-Christoph Sassenberg
-Colin Misare
-Craig Peterson
-Cristian Maglie
-Daehyeok Mun
-Daniel Leavitt
-Daniel Nilsson
-Dave Du Cros
-Dave Henderson
-David Deng
-David Jannotta
-David Ji
-Davide Zipeto
-Dennis Webb
-Dhi Aurrahman
-Diego Lapiduz
-Dmitri Shuralyov
-dmnlk
-Don Petersen
-Doug Turner
-Drew Fradette
-Eli Uriegas
-Elliott Beach
-Emerson Wood
-eperm
-Erick Fejta
-erwinvaneyk
-Evan Elias
-Fabrice
-Felix Geisendörfer
-Filippo Valsorda
-Florian Forster
-Francesc Gil
-Francis
-Fredrik Jönsson
-Garrett Squire
-George Kontridze
-Georgy Buranov
-Gnahz
-Google Inc.
-Grachev Mikhail
-griffin_stewie
-Guillaume Jacquet
-Guz Alexander
-Guðmundur Bjarni Ólafsson
-Hanno Hecker
-Hari haran
-haya14busa
-Huy Tr
-huydx
-i2bskn
-Isao Jonas
-isqua
-Jameel Haffejee
-Jan Kosecki
-Javier Campanini
-Jens Rantil
-Jeremy Morris
-Jesse Newland
-Jihoon Chung
-Jimmi Dyson
-Joan Saum
-Joe Tsai
-John Barton
-John Engelman
-Jordan Brockopp
-Jordan Sussman
-Joshua Bezaleel Abednego
-JP Phillips
-jpbelanger-mtl
-Juan Basso
-Julien Garcia Gonzalez
-Julien Rostand
-Justin Abrahms
-Jusung Lee
-jzhoucliqr
-Katrina Owen
-Kautilya Tripathi < tripathi.kautilya@gmail.com>
-Kautilya Tripathi
-Keita Urashima
-Kevin Burke
-Konrad Malawski
-Kookheon Kwon
-Krzysztof Kowalczyk
-Kshitij Saraogi
-kyokomi
-Laurent Verdoïa
-Liam Galvin
-Lovro Mažgon
-Lucas Alcantara
-Luke Evers
-Luke Kysow
-Luke Roberts
-Luke Young
-Maksim Zhylinski
-Mark Tareshawty
-Martin-Louis Bright
-Marwan Sulaiman
-Masayuki Izumi
-Mat Geist
-Matt
-Matt Brender
-Matt Gaunt
-Matt Landis
-Maxime Bury
-Michael Spiegel
-Michael Tiller
-Michał Glapa
-Nadav Kaner
-Nathan VanBenschoten
-Navaneeth Suresh
-Neil O'Toole
-Nick Miyake
-Nick Spragg
-Nikhita Raghunath
-Noah Zoschke
-ns-cweber
-Oleg Kovalov
-Ondřej Kupka
-Palash Nigam
-Panagiotis Moustafellos
-Parham Alvani
-Parker Moore
-parkhyukjun89
-Pavel Shtanko
-Pete Wagner
-Petr Shevtsov
-Pierre Carrier
-Piotr Zurek
-Quentin Leffray
-Quinn Slack
-Rackspace US, Inc.
-Radek Simko
-Radliński Ignacy
-Rajendra arora
-Ranbir Singh
-RaviTeja Pothana
-rc1140
-Red Hat, Inc.
-Ricco Førgaard
-Rob Figueiredo
-Rohit Upadhyay
-Ronak Jain
-Ruben Vereecken
-Ryan Leung
-Ryan Lower
-Sahil Dua
-saisi
-Sam Minnée
-Sandeep Sukhani
-Sander van Harmelen
-Sanket Payghan
-Sarasa Kisaragi
-Sean Wang
-Sebastian Mandrean
-Sebastian Mæland Pedersen
-Sergey Romanov
-Sevki
-Shagun Khemka
-shakeelrao
-Shawn Catanzarite
-Shawn Smith
-Shrikrishna Singh
-sona-tar
-SoundCloud, Ltd.
-Sridhar Mocherla
-SriVignessh Pss
-Stian Eikeland
-Tasya Aditya Rukmana
-Thomas Bruyelle
-Timothée Peignier
-Trey Tacon
-ttacon
-Vaibhav Singh
-Varadarajan Aravamudhan
-Victor Castell
-Victor Vrantchan
-Vlad Ungureanu
-Wasim Thabraze