From 5b32a0b6ad4354db75d196a98384cab0d3dd4032 Mon Sep 17 00:00:00 2001 From: melserngawy Date: Thu, 9 Nov 2023 15:10:10 -0500 Subject: [PATCH] Update OCM APIs and apply Rollout strategy API changes Signed-off-by: melserngawy --- ...cluster-manager.clusterserviceversion.yaml | 2 +- .../klusterlet.clusterserviceversion.yaml | 2 +- deps.diff | 4 + go.mod | 7 +- go.sum | 14 +- ...gement.io_clustermanagementaddons.crd.yaml | 201 +++++++++++--- ...gement.io_manifestworkreplicasets.crd.yaml | 193 ++++++++++--- .../addon_configuration_reconciler_test.go | 90 +++++-- .../manifestworkreplicaset_deploy_test.go | 16 +- pkg/work/hub/test/helper.go | 4 +- test/e2e/manifestworkreplicaset_test.go | 8 +- test/integration/addon/addon_configs_test.go | 5 + .../addon/addon_manager_install_test.go | 5 + .../addon/addon_manager_upgrade_test.go | 19 +- .../work/manifestworkreplicaset_test.go | 4 +- vendor/go.uber.org/atomic/.codecov.yml | 19 -- vendor/go.uber.org/atomic/.gitignore | 15 -- vendor/go.uber.org/atomic/CHANGELOG.md | 117 -------- vendor/go.uber.org/atomic/LICENSE.txt | 19 -- vendor/go.uber.org/atomic/Makefile | 79 ------ vendor/go.uber.org/atomic/README.md | 63 ----- vendor/go.uber.org/atomic/bool.go | 88 ------ vendor/go.uber.org/atomic/doc.go | 23 -- vendor/go.uber.org/atomic/duration.go | 89 ------ vendor/go.uber.org/atomic/duration_ext.go | 40 --- vendor/go.uber.org/atomic/error.go | 62 ----- vendor/go.uber.org/atomic/error_ext.go | 39 --- vendor/go.uber.org/atomic/float32.go | 77 ------ vendor/go.uber.org/atomic/float32_ext.go | 76 ------ vendor/go.uber.org/atomic/float64.go | 77 ------ vendor/go.uber.org/atomic/float64_ext.go | 76 ------ vendor/go.uber.org/atomic/gen.go | 27 -- vendor/go.uber.org/atomic/int32.go | 109 -------- vendor/go.uber.org/atomic/int64.go | 109 -------- vendor/go.uber.org/atomic/nocmp.go | 35 --- vendor/go.uber.org/atomic/pointer_go118.go | 60 ----- vendor/go.uber.org/atomic/pointer_go119.go | 61 ----- vendor/go.uber.org/atomic/string.go | 65 ----- vendor/go.uber.org/atomic/string_ext.go | 43 --- vendor/go.uber.org/atomic/time_ext.go | 36 --- vendor/go.uber.org/atomic/uint32.go | 109 -------- vendor/go.uber.org/atomic/uint64.go | 109 -------- vendor/go.uber.org/atomic/uintptr.go | 109 -------- vendor/go.uber.org/atomic/unsafe_pointer.go | 65 ----- vendor/go.uber.org/atomic/value.go | 31 --- vendor/go.uber.org/zap/.golangci.yml | 77 ++++++ vendor/go.uber.org/zap/CHANGELOG.md | 242 ++++++++++------- vendor/go.uber.org/zap/Makefile | 87 +++--- vendor/go.uber.org/zap/README.md | 62 +++-- vendor/go.uber.org/zap/array.go | 127 +++++++++ vendor/go.uber.org/zap/array_go118.go | 156 ----------- vendor/go.uber.org/zap/buffer/buffer.go | 5 + vendor/go.uber.org/zap/buffer/pool.go | 20 +- vendor/go.uber.org/zap/config.go | 84 +++++- vendor/go.uber.org/zap/error.go | 14 +- vendor/go.uber.org/zap/field.go | 194 +++++++++----- vendor/go.uber.org/zap/http_handler.go | 19 +- .../go.uber.org/zap/internal/level_enabler.go | 2 + .../bool_ext.go => zap/internal/pool/pool.go} | 49 ++-- .../stacktrace/stack.go} | 81 +++--- vendor/go.uber.org/zap/level.go | 9 +- vendor/go.uber.org/zap/logger.go | 48 +++- vendor/go.uber.org/zap/sink.go | 5 +- vendor/go.uber.org/zap/sugar.go | 69 +++-- vendor/go.uber.org/zap/writer.go | 12 +- .../zap/zapcore/console_encoder.go | 14 +- vendor/go.uber.org/zap/zapcore/core.go | 6 +- vendor/go.uber.org/zap/zapcore/entry.go | 22 +- vendor/go.uber.org/zap/zapcore/error.go | 14 +- .../go.uber.org/zap/zapcore/json_encoder.go | 155 ++++++----- .../time.go => zap/zapcore/lazy_with.go} | 51 ++-- vendor/go.uber.org/zap/zapcore/sampler.go | 9 +- vendor/go.uber.org/zap/zapgrpc/zapgrpc.go | 8 +- .../x/sync/singleflight/singleflight.go | 9 + vendor/modules.txt | 11 +- ...gement.io_clustermanagementaddons.crd.yaml | 201 +++++++++++--- .../api/cluster/v1alpha1/helpers.go | 253 ++++++++++++++---- .../cluster/v1alpha1/types_rolloutstrategy.go | 101 +++++-- .../cluster/v1alpha1/zz_generated.deepcopy.go | 38 +-- .../zz_generated.swagger_doc_generated.go | 28 +- ...gement.io_manifestworkreplicasets.crd.yaml | 193 ++++++++++--- .../v1alpha1/types_manifestworkreplicaset.go | 2 +- 82 files changed, 2094 insertions(+), 2884 deletions(-) create mode 100644 deps.diff delete mode 100644 vendor/go.uber.org/atomic/.codecov.yml delete mode 100644 vendor/go.uber.org/atomic/.gitignore delete mode 100644 vendor/go.uber.org/atomic/CHANGELOG.md delete mode 100644 vendor/go.uber.org/atomic/LICENSE.txt delete mode 100644 vendor/go.uber.org/atomic/Makefile delete mode 100644 vendor/go.uber.org/atomic/README.md delete mode 100644 vendor/go.uber.org/atomic/bool.go delete mode 100644 vendor/go.uber.org/atomic/doc.go delete mode 100644 vendor/go.uber.org/atomic/duration.go delete mode 100644 vendor/go.uber.org/atomic/duration_ext.go delete mode 100644 vendor/go.uber.org/atomic/error.go delete mode 100644 vendor/go.uber.org/atomic/error_ext.go delete mode 100644 vendor/go.uber.org/atomic/float32.go delete mode 100644 vendor/go.uber.org/atomic/float32_ext.go delete mode 100644 vendor/go.uber.org/atomic/float64.go delete mode 100644 vendor/go.uber.org/atomic/float64_ext.go delete mode 100644 vendor/go.uber.org/atomic/gen.go delete mode 100644 vendor/go.uber.org/atomic/int32.go delete mode 100644 vendor/go.uber.org/atomic/int64.go delete mode 100644 vendor/go.uber.org/atomic/nocmp.go delete mode 100644 vendor/go.uber.org/atomic/pointer_go118.go delete mode 100644 vendor/go.uber.org/atomic/pointer_go119.go delete mode 100644 vendor/go.uber.org/atomic/string.go delete mode 100644 vendor/go.uber.org/atomic/string_ext.go delete mode 100644 vendor/go.uber.org/atomic/time_ext.go delete mode 100644 vendor/go.uber.org/atomic/uint32.go delete mode 100644 vendor/go.uber.org/atomic/uint64.go delete mode 100644 vendor/go.uber.org/atomic/uintptr.go delete mode 100644 vendor/go.uber.org/atomic/unsafe_pointer.go delete mode 100644 vendor/go.uber.org/atomic/value.go create mode 100644 vendor/go.uber.org/zap/.golangci.yml delete mode 100644 vendor/go.uber.org/zap/array_go118.go rename vendor/go.uber.org/{atomic/bool_ext.go => zap/internal/pool/pool.go} (56%) rename vendor/go.uber.org/zap/{stacktrace.go => internal/stacktrace/stack.go} (73%) rename vendor/go.uber.org/{atomic/time.go => zap/zapcore/lazy_with.go} (60%) diff --git a/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/cluster-manager.clusterserviceversion.yaml b/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/cluster-manager.clusterserviceversion.yaml index 1decb5ba8..f97566477 100644 --- a/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/cluster-manager.clusterserviceversion.yaml +++ b/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/cluster-manager.clusterserviceversion.yaml @@ -59,7 +59,7 @@ metadata: categories: Integration & Delivery,OpenShift Optional certified: "false" containerImage: quay.io/open-cluster-management/registration-operator:latest - createdAt: "2023-10-10T01:17:41Z" + createdAt: "2023-11-09T20:05:14Z" description: Manages the installation and upgrade of the ClusterManager. operators.operatorframework.io/builder: operator-sdk-v1.32.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 diff --git a/deploy/klusterlet/olm-catalog/klusterlet/manifests/klusterlet.clusterserviceversion.yaml b/deploy/klusterlet/olm-catalog/klusterlet/manifests/klusterlet.clusterserviceversion.yaml index c3c5b344f..80775ecfd 100644 --- a/deploy/klusterlet/olm-catalog/klusterlet/manifests/klusterlet.clusterserviceversion.yaml +++ b/deploy/klusterlet/olm-catalog/klusterlet/manifests/klusterlet.clusterserviceversion.yaml @@ -31,7 +31,7 @@ metadata: categories: Integration & Delivery,OpenShift Optional certified: "false" containerImage: quay.io/open-cluster-management/registration-operator:latest - createdAt: "2023-10-10T01:17:42Z" + createdAt: "2023-11-09T20:05:14Z" description: Manages the installation and upgrade of the Klusterlet. operators.operatorframework.io/builder: operator-sdk-v1.32.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 diff --git a/deps.diff b/deps.diff new file mode 100644 index 000000000..e3d52171d --- /dev/null +++ b/deps.diff @@ -0,0 +1,4 @@ +diff --no-dereference -N -r current/vendor/modules.txt updated/vendor/modules.txt +506,507d505 +< # go.uber.org/atomic v1.10.0 +< ## explicit; go 1.18 diff --git a/go.mod b/go.mod index 288077b26..be7f5eec8 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( k8s.io/kube-aggregator v0.28.1 k8s.io/utils v0.0.0-20230726121419-3b25d923346b open-cluster-management.io/addon-framework v0.8.1-0.20231009020812-e52774032b4c - open-cluster-management.io/api v0.12.1-0.20231027024433-bab1208e6889 + open-cluster-management.io/api v0.12.1-0.20231109164634-c10ed7e097aa sigs.k8s.io/controller-runtime v0.15.0 sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 ) @@ -113,12 +113,11 @@ require ( go.opentelemetry.io/otel/sdk v1.10.0 // indirect go.opentelemetry.io/otel/trace v1.10.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.24.0 // indirect + go.uber.org/zap v1.26.0 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sync v0.2.0 // indirect + golang.org/x/sync v0.5.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect diff --git a/go.sum b/go.sum index 267a05cea..3acbf65a5 100644 --- a/go.sum +++ b/go.sum @@ -52,7 +52,6 @@ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -380,15 +379,14 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -485,8 +483,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -742,8 +740,8 @@ k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSn k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/addon-framework v0.8.1-0.20231009020812-e52774032b4c h1:9Rvj3UTjVwJWOItlIYx6shFF72f8L3t91T9IwZ8sx6Q= open-cluster-management.io/addon-framework v0.8.1-0.20231009020812-e52774032b4c/go.mod h1:r4sQGR9YgLC4hXC695sfinun2WhuigWrEPk2IeIl800= -open-cluster-management.io/api v0.12.1-0.20231027024433-bab1208e6889 h1:U57ynNMUY6umxZq9F+rLiVqjwky2eXMSHEk5mAtwau0= -open-cluster-management.io/api v0.12.1-0.20231027024433-bab1208e6889/go.mod h1:RaKSNLO1I3xYfvIwIcCxFYgIUp3NOseG0xoGfReBEPw= +open-cluster-management.io/api v0.12.1-0.20231109164634-c10ed7e097aa h1:0qGjPy3jCPmUBtoZhGlNBmy91xfFmXXanstDmFY+b+o= +open-cluster-management.io/api v0.12.1-0.20231109164634-c10ed7e097aa/go.mod h1:/I/nFccB0tmF+dZg7pHuzY3SaXOX86MI4vcFtidJ0OM= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/manifests/cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml b/manifests/cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml index a42b5f57c..e08074e70 100644 --- a/manifests/cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml +++ b/manifests/cluster-manager/hub/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml @@ -145,24 +145,69 @@ spec: defined in ClusterManagementAddOn. properties: all: - description: All define required fields for RolloutStrategy + description: All defines required fields for RolloutStrategy type All properties: + maxFailures: + anyOf: + - type: integer + - type: string + default: "0" + description: MaxFailures is a percentage or number + of clusters in the current rollout that can fail + before proceeding to the next rollout. MaxFailures + is only considered for rollout types Progressive + and ProgressivePerGroup. For Progressive, this + is considered over the total number of clusters. + For ProgressivePerGroup, this is considered according + to the size of the current group. For both Progressive + and ProgressivePerGroup, the MaxFailures does + not apply for MandatoryDecisionGroups, which tolerate + no failures. Default is that no failures are tolerated. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + minSuccessTime: + default: "0" + description: MinSuccessTime is a "soak" time. In + other words, the minimum amount of time the workload + applier controller will wait from the start of + each rollout before proceeding (assuming a successful + state has been reached and MaxFailures wasn't + breached). MinSuccessTime is only considered for + rollout types Progressive and ProgressivePerGroup. + The default value is 0 meaning the workload applier + proceeds immediately after a successful state + is reached. MinSuccessTime must be defined in + [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m + , 360s + type: string + progressDeadline: + default: None + description: ProgressDeadline defines how long workload + applier controller will wait for the workload + to reach a successful state in the cluster. ProgressDeadline + default value is "None", meaning the workload + applier will wait for a successful state indefinitely. + ProgressDeadline must be defined in [0-9h]|[0-9m]|[0-9s] + format examples; 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string timeout: default: None - description: Timeout define how long workload applier - controller will wait till workload reach successful - state in the cluster. Timeout default value is - None meaning the workload applier will not proceed - apply workload to other clusters if did not reach - the successful state. Timeout must be defined - in [0-9h]|[0-9m]|[0-9s] format examples; 2h , - 90m , 360s + description: "Timeout defines how long the workload + applier controller will wait until the workload + reaches a successful state in the cluster. Timeout + default value is None meaning the workload applier + will not proceed apply workload to other clusters + if did not reach the successful state. Timeout + must be defined in [0-9h]|[0-9m]|[0-9s] format + examples; 2h , 90m , 360s \n Deprecated: Use ProgressDeadline + instead." pattern: ^(([0-9])+[h|m|s])|None$ type: string type: object progressive: - description: Progressive define required fields for + description: Progressive defines required fields for RolloutStrategy type Progressive properties: mandatoryDecisionGroups: @@ -200,21 +245,66 @@ spec: placement->DecisionStrategy. pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ x-kubernetes-int-or-string: true + maxFailures: + anyOf: + - type: integer + - type: string + default: "0" + description: MaxFailures is a percentage or number + of clusters in the current rollout that can fail + before proceeding to the next rollout. MaxFailures + is only considered for rollout types Progressive + and ProgressivePerGroup. For Progressive, this + is considered over the total number of clusters. + For ProgressivePerGroup, this is considered according + to the size of the current group. For both Progressive + and ProgressivePerGroup, the MaxFailures does + not apply for MandatoryDecisionGroups, which tolerate + no failures. Default is that no failures are tolerated. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + minSuccessTime: + default: "0" + description: MinSuccessTime is a "soak" time. In + other words, the minimum amount of time the workload + applier controller will wait from the start of + each rollout before proceeding (assuming a successful + state has been reached and MaxFailures wasn't + breached). MinSuccessTime is only considered for + rollout types Progressive and ProgressivePerGroup. + The default value is 0 meaning the workload applier + proceeds immediately after a successful state + is reached. MinSuccessTime must be defined in + [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m + , 360s + type: string + progressDeadline: + default: None + description: ProgressDeadline defines how long workload + applier controller will wait for the workload + to reach a successful state in the cluster. ProgressDeadline + default value is "None", meaning the workload + applier will wait for a successful state indefinitely. + ProgressDeadline must be defined in [0-9h]|[0-9m]|[0-9s] + format examples; 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string timeout: default: None - description: Timeout define how long workload applier - controller will wait till workload reach successful - state in the cluster. Timeout default value is - None meaning the workload applier will not proceed - apply workload to other clusters if did not reach - the successful state. Timeout must be defined - in [0-9h]|[0-9m]|[0-9s] format examples; 2h , - 90m , 360s + description: "Timeout defines how long the workload + applier controller will wait until the workload + reaches a successful state in the cluster. Timeout + default value is None meaning the workload applier + will not proceed apply workload to other clusters + if did not reach the successful state. Timeout + must be defined in [0-9h]|[0-9m]|[0-9s] format + examples; 2h , 90m , 360s \n Deprecated: Use ProgressDeadline + instead." pattern: ^(([0-9])+[h|m|s])|None$ type: string type: object progressivePerGroup: - description: ProgressivePerGroup define required fields + description: ProgressivePerGroup defines required fields for RolloutStrategy type ProgressivePerGroup properties: mandatoryDecisionGroups: @@ -241,33 +331,66 @@ spec: type: string type: object type: array + maxFailures: + anyOf: + - type: integer + - type: string + default: "0" + description: MaxFailures is a percentage or number + of clusters in the current rollout that can fail + before proceeding to the next rollout. MaxFailures + is only considered for rollout types Progressive + and ProgressivePerGroup. For Progressive, this + is considered over the total number of clusters. + For ProgressivePerGroup, this is considered according + to the size of the current group. For both Progressive + and ProgressivePerGroup, the MaxFailures does + not apply for MandatoryDecisionGroups, which tolerate + no failures. Default is that no failures are tolerated. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + minSuccessTime: + default: "0" + description: MinSuccessTime is a "soak" time. In + other words, the minimum amount of time the workload + applier controller will wait from the start of + each rollout before proceeding (assuming a successful + state has been reached and MaxFailures wasn't + breached). MinSuccessTime is only considered for + rollout types Progressive and ProgressivePerGroup. + The default value is 0 meaning the workload applier + proceeds immediately after a successful state + is reached. MinSuccessTime must be defined in + [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m + , 360s + type: string + progressDeadline: + default: None + description: ProgressDeadline defines how long workload + applier controller will wait for the workload + to reach a successful state in the cluster. ProgressDeadline + default value is "None", meaning the workload + applier will wait for a successful state indefinitely. + ProgressDeadline must be defined in [0-9h]|[0-9m]|[0-9s] + format examples; 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string timeout: default: None - description: Timeout define how long workload applier - controller will wait till workload reach successful - state in the cluster. Timeout default value is - None meaning the workload applier will not proceed - apply workload to other clusters if did not reach - the successful state. Timeout must be defined - in [0-9h]|[0-9m]|[0-9s] format examples; 2h , - 90m , 360s + description: "Timeout defines how long the workload + applier controller will wait until the workload + reaches a successful state in the cluster. Timeout + default value is None meaning the workload applier + will not proceed apply workload to other clusters + if did not reach the successful state. Timeout + must be defined in [0-9h]|[0-9m]|[0-9s] format + examples; 2h , 90m , 360s \n Deprecated: Use ProgressDeadline + instead." pattern: ^(([0-9])+[h|m|s])|None$ type: string type: object type: default: All - description: Rollout strategy Types are All, Progressive - and ProgressivePerGroup 1) All means apply the workload - to all clusters in the decision groups at once. 2) - Progressive means apply the workload to the selected - clusters progressively per cluster. The workload will - not be applied to the next cluster unless one of the - current applied clusters reach the successful state - or timeout. 3) ProgressivePerGroup means apply the - workload to decisionGroup clusters progressively per - group. The workload will not be applied to the next - decisionGroup unless all clusters in the current group - reach the successful state or timeout. enum: - All - Progressive diff --git a/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml b/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml index ecd82e555..c5a083042 100644 --- a/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml +++ b/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml @@ -328,29 +328,73 @@ spec: rolloutStrategy: default: all: - timeout: None + progressDeadline: None type: All description: Rollout strategy to apply workload to the selected clusters by Placement and DecisionStrategy. properties: all: - description: All define required fields for RolloutStrategy + description: All defines required fields for RolloutStrategy type All properties: + maxFailures: + anyOf: + - type: integer + - type: string + default: "0" + description: MaxFailures is a percentage or number of + clusters in the current rollout that can fail before + proceeding to the next rollout. MaxFailures is only + considered for rollout types Progressive and ProgressivePerGroup. + For Progressive, this is considered over the total + number of clusters. For ProgressivePerGroup, this + is considered according to the size of the current + group. For both Progressive and ProgressivePerGroup, + the MaxFailures does not apply for MandatoryDecisionGroups, + which tolerate no failures. Default is that no failures + are tolerated. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + minSuccessTime: + default: "0" + description: MinSuccessTime is a "soak" time. In other + words, the minimum amount of time the workload applier + controller will wait from the start of each rollout + before proceeding (assuming a successful state has + been reached and MaxFailures wasn't breached). MinSuccessTime + is only considered for rollout types Progressive and + ProgressivePerGroup. The default value is 0 meaning + the workload applier proceeds immediately after a + successful state is reached. MinSuccessTime must be + defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h + , 90m , 360s + type: string + progressDeadline: + default: None + description: ProgressDeadline defines how long workload + applier controller will wait for the workload to reach + a successful state in the cluster. ProgressDeadline + default value is "None", meaning the workload applier + will wait for a successful state indefinitely. ProgressDeadline + must be defined in [0-9h]|[0-9m]|[0-9s] format examples; + 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string timeout: default: None - description: Timeout define how long workload applier - controller will wait till workload reach successful - state in the cluster. Timeout default value is None - meaning the workload applier will not proceed apply - workload to other clusters if did not reach the successful - state. Timeout must be defined in [0-9h]|[0-9m]|[0-9s] - format examples; 2h , 90m , 360s + description: "Timeout defines how long the workload + applier controller will wait until the workload reaches + a successful state in the cluster. Timeout default + value is None meaning the workload applier will not + proceed apply workload to other clusters if did not + reach the successful state. Timeout must be defined + in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m + , 360s \n Deprecated: Use ProgressDeadline instead." pattern: ^(([0-9])+[h|m|s])|None$ type: string type: object progressive: - description: Progressive define required fields for RolloutStrategy + description: Progressive defines required fields for RolloutStrategy type Progressive properties: mandatoryDecisionGroups: @@ -387,20 +431,64 @@ spec: defined in the placement->DecisionStrategy. pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ x-kubernetes-int-or-string: true + maxFailures: + anyOf: + - type: integer + - type: string + default: "0" + description: MaxFailures is a percentage or number of + clusters in the current rollout that can fail before + proceeding to the next rollout. MaxFailures is only + considered for rollout types Progressive and ProgressivePerGroup. + For Progressive, this is considered over the total + number of clusters. For ProgressivePerGroup, this + is considered according to the size of the current + group. For both Progressive and ProgressivePerGroup, + the MaxFailures does not apply for MandatoryDecisionGroups, + which tolerate no failures. Default is that no failures + are tolerated. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + minSuccessTime: + default: "0" + description: MinSuccessTime is a "soak" time. In other + words, the minimum amount of time the workload applier + controller will wait from the start of each rollout + before proceeding (assuming a successful state has + been reached and MaxFailures wasn't breached). MinSuccessTime + is only considered for rollout types Progressive and + ProgressivePerGroup. The default value is 0 meaning + the workload applier proceeds immediately after a + successful state is reached. MinSuccessTime must be + defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h + , 90m , 360s + type: string + progressDeadline: + default: None + description: ProgressDeadline defines how long workload + applier controller will wait for the workload to reach + a successful state in the cluster. ProgressDeadline + default value is "None", meaning the workload applier + will wait for a successful state indefinitely. ProgressDeadline + must be defined in [0-9h]|[0-9m]|[0-9s] format examples; + 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string timeout: default: None - description: Timeout define how long workload applier - controller will wait till workload reach successful - state in the cluster. Timeout default value is None - meaning the workload applier will not proceed apply - workload to other clusters if did not reach the successful - state. Timeout must be defined in [0-9h]|[0-9m]|[0-9s] - format examples; 2h , 90m , 360s + description: "Timeout defines how long the workload + applier controller will wait until the workload reaches + a successful state in the cluster. Timeout default + value is None meaning the workload applier will not + proceed apply workload to other clusters if did not + reach the successful state. Timeout must be defined + in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m + , 360s \n Deprecated: Use ProgressDeadline instead." pattern: ^(([0-9])+[h|m|s])|None$ type: string type: object progressivePerGroup: - description: ProgressivePerGroup define required fields + description: ProgressivePerGroup defines required fields for RolloutStrategy type ProgressivePerGroup properties: mandatoryDecisionGroups: @@ -427,31 +515,64 @@ spec: type: string type: object type: array + maxFailures: + anyOf: + - type: integer + - type: string + default: "0" + description: MaxFailures is a percentage or number of + clusters in the current rollout that can fail before + proceeding to the next rollout. MaxFailures is only + considered for rollout types Progressive and ProgressivePerGroup. + For Progressive, this is considered over the total + number of clusters. For ProgressivePerGroup, this + is considered according to the size of the current + group. For both Progressive and ProgressivePerGroup, + the MaxFailures does not apply for MandatoryDecisionGroups, + which tolerate no failures. Default is that no failures + are tolerated. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + minSuccessTime: + default: "0" + description: MinSuccessTime is a "soak" time. In other + words, the minimum amount of time the workload applier + controller will wait from the start of each rollout + before proceeding (assuming a successful state has + been reached and MaxFailures wasn't breached). MinSuccessTime + is only considered for rollout types Progressive and + ProgressivePerGroup. The default value is 0 meaning + the workload applier proceeds immediately after a + successful state is reached. MinSuccessTime must be + defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h + , 90m , 360s + type: string + progressDeadline: + default: None + description: ProgressDeadline defines how long workload + applier controller will wait for the workload to reach + a successful state in the cluster. ProgressDeadline + default value is "None", meaning the workload applier + will wait for a successful state indefinitely. ProgressDeadline + must be defined in [0-9h]|[0-9m]|[0-9s] format examples; + 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string timeout: default: None - description: Timeout define how long workload applier - controller will wait till workload reach successful - state in the cluster. Timeout default value is None - meaning the workload applier will not proceed apply - workload to other clusters if did not reach the successful - state. Timeout must be defined in [0-9h]|[0-9m]|[0-9s] - format examples; 2h , 90m , 360s + description: "Timeout defines how long the workload + applier controller will wait until the workload reaches + a successful state in the cluster. Timeout default + value is None meaning the workload applier will not + proceed apply workload to other clusters if did not + reach the successful state. Timeout must be defined + in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m + , 360s \n Deprecated: Use ProgressDeadline instead." pattern: ^(([0-9])+[h|m|s])|None$ type: string type: object type: default: All - description: Rollout strategy Types are All, Progressive - and ProgressivePerGroup 1) All means apply the workload - to all clusters in the decision groups at once. 2) Progressive - means apply the workload to the selected clusters progressively - per cluster. The workload will not be applied to the next - cluster unless one of the current applied clusters reach - the successful state or timeout. 3) ProgressivePerGroup - means apply the workload to decisionGroup clusters progressively - per group. The workload will not be applied to the next - decisionGroup unless all clusters in the current group - reach the successful state or timeout. enum: - All - Progressive diff --git a/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go b/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go index 8fccc7e11..3c67e5986 100644 --- a/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go +++ b/pkg/addon/controllers/addonconfiguration/addon_configuration_reconciler_test.go @@ -116,8 +116,15 @@ func TestAddonConfigReconcile(t *testing.T) { SpecHash: "hash", }, }).WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ - PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, - RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}, + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + RolloutStrategy: clusterv1alpha1.RolloutStrategy{ + Type: clusterv1alpha1.All, + All: &clusterv1alpha1.RolloutAll{ + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }, + }, }).WithInstallProgression(addonv1alpha1.InstallProgression{ PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, ConfigReferences: []addonv1alpha1.InstallConfigReference{ @@ -190,8 +197,15 @@ func TestAddonConfigReconcile(t *testing.T) { SpecHash: "hash", }, }).WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ - PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, - RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}, + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + RolloutStrategy: clusterv1alpha1.RolloutStrategy{ + Type: clusterv1alpha1.All, + All: &clusterv1alpha1.RolloutAll{ + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }, + }, }).WithInstallProgression(addonv1alpha1.InstallProgression{ PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, ConfigReferences: []addonv1alpha1.InstallConfigReference{ @@ -262,8 +276,15 @@ func TestAddonConfigReconcile(t *testing.T) { ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}, }).WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ - PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, - RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}, + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + RolloutStrategy: clusterv1alpha1.RolloutStrategy{ + Type: clusterv1alpha1.All, + All: &clusterv1alpha1.RolloutAll{ + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }, + }, }).WithInstallProgression(addonv1alpha1.InstallProgression{ PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, ConfigReferences: []addonv1alpha1.InstallConfigReference{ @@ -324,8 +345,15 @@ func TestAddonConfigReconcile(t *testing.T) { ConfigGroupResource: addonv1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, DefaultConfig: &addonv1alpha1.ConfigReferent{Name: "test"}, }).WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ - PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, - RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}, + PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, + RolloutStrategy: clusterv1alpha1.RolloutStrategy{ + Type: clusterv1alpha1.All, + All: &clusterv1alpha1.RolloutAll{ + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }, + }, Configs: []addonv1alpha1.AddOnConfig{v1alpha1.AddOnConfig{ ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"}}}, @@ -393,7 +421,14 @@ func TestAddonConfigReconcile(t *testing.T) { Configs: []addonv1alpha1.AddOnConfig{v1alpha1.AddOnConfig{ ConfigGroupResource: v1alpha1.ConfigGroupResource{Group: "core", Resource: "Foo"}, ConfigReferent: v1alpha1.ConfigReferent{Name: "test1"}}}, - RolloutStrategy: clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All}, + RolloutStrategy: clusterv1alpha1.RolloutStrategy{ + Type: clusterv1alpha1.All, + All: &clusterv1alpha1.RolloutAll{ + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }, + }, }).WithInstallProgression(addonv1alpha1.InstallProgression{ PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, ConfigReferences: []addonv1alpha1.InstallConfigReference{ @@ -436,8 +471,13 @@ func TestAddonConfigReconcile(t *testing.T) { clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, RolloutStrategy: clusterv1alpha1.RolloutStrategy{ - Type: clusterv1alpha1.Progressive, - Progressive: &clusterv1alpha1.RolloutProgressive{MaxConcurrency: intstr.FromInt(1)}}, + Type: clusterv1alpha1.Progressive, + Progressive: &clusterv1alpha1.RolloutProgressive{ + MaxConcurrency: intstr.FromInt(1), + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }}, }).WithInstallProgression(addonv1alpha1.InstallProgression{ PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, ConfigReferences: []addonv1alpha1.InstallConfigReference{ @@ -492,8 +532,13 @@ func TestAddonConfigReconcile(t *testing.T) { clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, RolloutStrategy: clusterv1alpha1.RolloutStrategy{ - Type: clusterv1alpha1.Progressive, - Progressive: &clusterv1alpha1.RolloutProgressive{MaxConcurrency: intstr.FromString("50%")}}, + Type: clusterv1alpha1.Progressive, + Progressive: &clusterv1alpha1.RolloutProgressive{ + MaxConcurrency: intstr.FromString("50%"), + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }}, }).WithInstallProgression(addonv1alpha1.InstallProgression{ PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, ConfigReferences: []addonv1alpha1.InstallConfigReference{ @@ -548,8 +593,12 @@ func TestAddonConfigReconcile(t *testing.T) { clusterManagementAddon: addontesting.NewClusterManagementAddon("test", "", "").WithPlacementStrategy(addonv1alpha1.PlacementStrategy{ PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, RolloutStrategy: clusterv1alpha1.RolloutStrategy{ - Type: clusterv1alpha1.Progressive, - Progressive: &clusterv1alpha1.RolloutProgressive{}}, + Type: clusterv1alpha1.Progressive, + Progressive: &clusterv1alpha1.RolloutProgressive{ + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }}, }).WithInstallProgression(addonv1alpha1.InstallProgression{ PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, ConfigReferences: []addonv1alpha1.InstallConfigReference{ @@ -635,6 +684,9 @@ func TestAddonConfigReconcile(t *testing.T) { {GroupName: "group1"}, }, }, + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, }, }}).WithInstallProgression(addonv1alpha1.InstallProgression{ PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, @@ -714,6 +766,11 @@ func TestAddonConfigReconcile(t *testing.T) { PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, RolloutStrategy: clusterv1alpha1.RolloutStrategy{ Type: clusterv1alpha1.ProgressivePerGroup, + ProgressivePerGroup: &clusterv1alpha1.RolloutProgressivePerGroup{ + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }, }}).WithInstallProgression(addonv1alpha1.InstallProgression{ PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, ConfigReferences: []addonv1alpha1.InstallConfigReference{ @@ -799,6 +856,9 @@ func TestAddonConfigReconcile(t *testing.T) { {GroupName: "group1"}, }, }, + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, }, }}).WithInstallProgression(addonv1alpha1.InstallProgression{ PlacementRef: addonv1alpha1.PlacementRef{Name: "test-placement", Namespace: "default"}, diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go index c4c4771e8..e35cdd0fb 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go @@ -222,7 +222,9 @@ func TestDeployWithRolloutStrategyReconcileAsExpected(t *testing.T) { perGoupeRollOut := clusterv1alpha1.RolloutStrategy{ Type: clusterv1alpha1.ProgressivePerGroup, ProgressivePerGroup: &clusterv1alpha1.RolloutProgressivePerGroup{ - Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, }, } mwrSet := helpertest.CreateTestManifestWorkReplicaSetWithRollOutStrategy("mwrSet-test", "default", @@ -358,13 +360,17 @@ func TestDeployWithMultiPlacementsReconcileAsExpected(t *testing.T) { perGoupeRollOut := clusterv1alpha1.RolloutStrategy{ Type: clusterv1alpha1.ProgressivePerGroup, ProgressivePerGroup: &clusterv1alpha1.RolloutProgressivePerGroup{ - Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, }, } allRollOut := clusterv1alpha1.RolloutStrategy{ Type: clusterv1alpha1.All, All: &clusterv1alpha1.RolloutAll{ - Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, }, } @@ -474,7 +480,9 @@ func TestDeployMWRSetSpecChangesReconcile(t *testing.T) { perGoupeRollOut := clusterv1alpha1.RolloutStrategy{ Type: clusterv1alpha1.ProgressivePerGroup, ProgressivePerGroup: &clusterv1alpha1.RolloutProgressivePerGroup{ - Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, }, } mwrSet := helpertest.CreateTestManifestWorkReplicaSetWithRollOutStrategy("mwrSet-test", "default", diff --git a/pkg/work/hub/test/helper.go b/pkg/work/hub/test/helper.go index c03dcb37c..34091158c 100644 --- a/pkg/work/hub/test/helper.go +++ b/pkg/work/hub/test/helper.go @@ -21,7 +21,9 @@ func CreateTestManifestWorkReplicaSet(name string, ns string, placementNames ... allRollOut := clusterv1alpha1.RolloutStrategy{ Type: clusterv1alpha1.All, All: &clusterv1alpha1.RolloutAll{ - Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, }, } diff --git a/test/e2e/manifestworkreplicaset_test.go b/test/e2e/manifestworkreplicaset_test.go index 107280311..4b4c5a054 100644 --- a/test/e2e/manifestworkreplicaset_test.go +++ b/test/e2e/manifestworkreplicaset_test.go @@ -59,7 +59,9 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() { RolloutStrategy: clusterv1alpha1.RolloutStrategy{ Type: clusterv1alpha1.All, All: &clusterv1alpha1.RolloutAll{ - Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, }, }, } @@ -275,7 +277,9 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() { RolloutStrategy: clusterv1alpha1.RolloutStrategy{ Type: clusterv1alpha1.All, All: &clusterv1alpha1.RolloutAll{ - Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, }, }, } diff --git a/test/integration/addon/addon_configs_test.go b/test/integration/addon/addon_configs_test.go index 2713ef279..464e1d0db 100644 --- a/test/integration/addon/addon_configs_test.go +++ b/test/integration/addon/addon_configs_test.go @@ -154,6 +154,11 @@ var _ = ginkgo.Describe("AddConfigs", func() { }, RolloutStrategy: clusterv1alpha1.RolloutStrategy{ Type: clusterv1alpha1.All, + All: &clusterv1alpha1.RolloutAll{ + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }, }, }, }, diff --git a/test/integration/addon/addon_manager_install_test.go b/test/integration/addon/addon_manager_install_test.go index 9e00b34d3..26965b4fe 100644 --- a/test/integration/addon/addon_manager_install_test.go +++ b/test/integration/addon/addon_manager_install_test.go @@ -115,6 +115,11 @@ var _ = ginkgo.Describe("Agent deploy", func() { PlacementRef: addonapiv1alpha1.PlacementRef{Name: "test-placement", Namespace: placementNamespace}, RolloutStrategy: clusterv1alpha1.RolloutStrategy{ Type: clusterv1alpha1.All, + All: &clusterv1alpha1.RolloutAll{ + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }, }, }, }, diff --git a/test/integration/addon/addon_manager_upgrade_test.go b/test/integration/addon/addon_manager_upgrade_test.go index caa4ce23b..b8d8f2d91 100644 --- a/test/integration/addon/addon_manager_upgrade_test.go +++ b/test/integration/addon/addon_manager_upgrade_test.go @@ -97,6 +97,11 @@ var _ = ginkgo.Describe("Addon upgrade", func() { PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementName, Namespace: placementNamespace}, RolloutStrategy: clusterv1alpha1.RolloutStrategy{ Type: clusterv1alpha1.All, + All: &clusterv1alpha1.RolloutAll{ + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }, }, Configs: []addonapiv1alpha1.AddOnConfig{ { @@ -365,8 +370,13 @@ var _ = ginkgo.Describe("Addon upgrade", func() { cma, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), cma.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) cma.Spec.InstallStrategy.Placements[0].RolloutStrategy = clusterv1alpha1.RolloutStrategy{ - Type: clusterv1alpha1.Progressive, - Progressive: &clusterv1alpha1.RolloutProgressive{MaxConcurrency: intstr.FromInt(2)}, + Type: clusterv1alpha1.Progressive, + Progressive: &clusterv1alpha1.RolloutProgressive{ + MaxConcurrency: intstr.FromInt(2), + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }, } cma.Spec.InstallStrategy.Placements[0].Configs[0].ConfigReferent = addonapiv1alpha1.ConfigReferent{Namespace: configDefaultNamespace, Name: configUpdateName} patchClusterManagementAddOn(context.Background(), cma) @@ -615,6 +625,11 @@ var _ = ginkgo.Describe("Addon upgrade", func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) cma.Spec.InstallStrategy.Placements[0].RolloutStrategy = clusterv1alpha1.RolloutStrategy{ Type: clusterv1alpha1.ProgressivePerGroup, + ProgressivePerGroup: &clusterv1alpha1.RolloutProgressivePerGroup{ + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, + }, } patchClusterManagementAddOn(context.Background(), cma) diff --git a/test/integration/work/manifestworkreplicaset_test.go b/test/integration/work/manifestworkreplicaset_test.go index 914cb094c..123d820e6 100644 --- a/test/integration/work/manifestworkreplicaset_test.go +++ b/test/integration/work/manifestworkreplicaset_test.go @@ -61,7 +61,9 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { RolloutStrategy: clusterv1alpha1.RolloutStrategy{ Type: clusterv1alpha1.All, All: &clusterv1alpha1.RolloutAll{ - Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + RolloutConfig: clusterv1alpha1.RolloutConfig{ + ProgressDeadline: "None", + }, }, }, } diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml deleted file mode 100644 index 571116cc3..000000000 --- a/vendor/go.uber.org/atomic/.codecov.yml +++ /dev/null @@ -1,19 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - -# Also update COVER_IGNORE_PKGS in the Makefile. -ignore: - - /internal/gen-atomicint/ - - /internal/gen-valuewrapper/ diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore deleted file mode 100644 index 2e337a0ed..000000000 --- a/vendor/go.uber.org/atomic/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -/bin -.DS_Store -/vendor -cover.html -cover.out -lint.log - -# Binaries -*.test - -# Profiling output -*.prof - -# Output of fossa analyzer -/fossa diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md deleted file mode 100644 index 5fe03f21b..000000000 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ /dev/null @@ -1,117 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [1.10.0] - 2022-08-11 -### Added -- Add `atomic.Float32` type for atomic operations on `float32`. -- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`, - and `atomic.Value`. -- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any - type. This is present only for Go 1.18 or higher, and is a drop-in for - replacement for the standard library's `sync/atomic.Pointer` type. - -### Changed -- Deprecate `CAS` methods on all types in favor of corresponding - `CompareAndSwap` methods. - -Thanks to @eNV25 and @icpd for their contributions to this release. - -[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0 - -## [1.9.0] - 2021-07-15 -### Added -- Add `Float64.Swap` to match int atomic operations. -- Add `atomic.Time` type for atomic operations on `time.Time` values. - -[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0 - -## [1.8.0] - 2021-06-09 -### Added -- Add `atomic.Uintptr` type for atomic operations on `uintptr` values. -- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values. - -[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0 - -## [1.7.0] - 2020-09-14 -### Added -- Support JSON serialization and deserialization of primitive atomic types. -- Support Text marshalling and unmarshalling for string atomics. - -### Changed -- Disallow incorrect comparison of atomic values in a non-atomic way. - -### Removed -- Remove dependency on `golang.org/x/{lint, tools}`. - -[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 - -## [1.6.0] - 2020-02-24 -### Changed -- Drop library dependency on `golang.org/x/{lint, tools}`. - -[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 - -## [1.5.1] - 2019-11-19 -- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together - causing `CAS` to fail even though the old value matches. - -[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 - -## [1.5.0] - 2019-10-29 -### Changed -- With Go modules, only the `go.uber.org/atomic` import path is supported now. - If you need to use the old import path, please add a `replace` directive to - your `go.mod`. - -[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 - -## [1.4.0] - 2019-05-01 -### Added - - Add `atomic.Error` type for atomic operations on `error` values. - -[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 - -## [1.3.2] - 2018-05-02 -### Added -- Add `atomic.Duration` type for atomic operations on `time.Duration` values. - -[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 - -## [1.3.1] - 2017-11-14 -### Fixed -- Revert optimization for `atomic.String.Store("")` which caused data races. - -[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 - -## [1.3.0] - 2017-11-13 -### Added -- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. - -### Changed -- Optimize `atomic.String.Store("")` by avoiding an allocation. - -[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 - -## [1.2.0] - 2017-04-12 -### Added -- Shadow `atomic.Value` from `sync/atomic`. - -[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 - -## [1.1.0] - 2017-03-10 -### Added -- Add atomic `Float64` type. - -### Changed -- Support new `go.uber.org/atomic` import path. - -[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 - -## [1.0.0] - 2016-07-18 - -- Initial release. - -[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt deleted file mode 100644 index 8765c9fbc..000000000 --- a/vendor/go.uber.org/atomic/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile deleted file mode 100644 index 46c945b32..000000000 --- a/vendor/go.uber.org/atomic/Makefile +++ /dev/null @@ -1,79 +0,0 @@ -# Directory to place `go install`ed binaries into. -export GOBIN ?= $(shell pwd)/bin - -GOLINT = $(GOBIN)/golint -GEN_ATOMICINT = $(GOBIN)/gen-atomicint -GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper -STATICCHECK = $(GOBIN)/staticcheck - -GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) - -# Also update ignore section in .codecov.yml. -COVER_IGNORE_PKGS = \ - go.uber.org/atomic/internal/gen-atomicint \ - go.uber.org/atomic/internal/gen-atomicwrapper - -.PHONY: build -build: - go build ./... - -.PHONY: test -test: - go test -race ./... - -.PHONY: gofmt -gofmt: - $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) - gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true - @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) - -$(GOLINT): - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): - cd tools && go install honnef.co/go/tools/cmd/staticcheck - -$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) - go build -o $@ ./internal/gen-atomicwrapper - -$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) - go build -o $@ ./internal/gen-atomicint - -.PHONY: golint -golint: $(GOLINT) - $(GOLINT) ./... - -.PHONY: staticcheck -staticcheck: $(STATICCHECK) - $(STATICCHECK) ./... - -.PHONY: lint -lint: gofmt golint staticcheck generatenodirty - -# comma separated list of packages to consider for code coverage. -COVER_PKG = $(shell \ - go list -find ./... | \ - grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ - paste -sd, -) - -.PHONY: cover -cover: - go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... - go tool cover -html=cover.out -o cover.html - -.PHONY: generate -generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) - go generate ./... - -.PHONY: generatenodirty -generatenodirty: - @[ -z "$$(git status --porcelain)" ] || ( \ - echo "Working tree is dirty. Commit your changes first."; \ - git status; \ - exit 1 ) - @make generate - @status=$$(git status --porcelain); \ - [ -z "$$status" ] || ( \ - echo "Working tree is dirty after `make generate`:"; \ - echo "$$status"; \ - echo "Please ensure that the generated code is up-to-date." ) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md deleted file mode 100644 index 96b47a1f1..000000000 --- a/vendor/go.uber.org/atomic/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] - -Simple wrappers for primitive types to enforce atomic access. - -## Installation - -```shell -$ go get -u go.uber.org/atomic@v1 -``` - -### Legacy Import Path - -As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way -of using this package. If you are using Go modules, this package will fail to -compile with the legacy import path path `github.com/uber-go/atomic`. - -We recommend migrating your code to the new import path but if you're unable -to do so, or if your dependencies are still using the old import path, you -will have to add a `replace` directive to your `go.mod` file downgrading the -legacy import path to an older version. - -``` -replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 -``` - -You can do so automatically by running the following command. - -```shell -$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 -``` - -## Usage - -The standard library's `sync/atomic` is powerful, but it's easy to forget which -variables must be accessed atomically. `go.uber.org/atomic` preserves all the -functionality of the standard library, but wraps the primitive types to -provide a safer, more convenient API. - -```go -var atom atomic.Uint32 -atom.Store(42) -atom.Sub(2) -atom.CAS(40, 11) -``` - -See the [documentation][doc] for a complete API specification. - -## Development Status - -Stable. - ---- - -Released under the [MIT License](LICENSE.txt). - -[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg -[doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg -[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml -[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/atomic -[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic -[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go deleted file mode 100644 index dfa2085f4..000000000 --- a/vendor/go.uber.org/atomic/bool.go +++ /dev/null @@ -1,88 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" -) - -// Bool is an atomic type-safe wrapper for bool values. -type Bool struct { - _ nocmp // disallow non-atomic comparison - - v Uint32 -} - -var _zeroBool bool - -// NewBool creates a new Bool. -func NewBool(val bool) *Bool { - x := &Bool{} - if val != _zeroBool { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped bool. -func (x *Bool) Load() bool { - return truthy(x.v.Load()) -} - -// Store atomically stores the passed bool. -func (x *Bool) Store(val bool) { - x.v.Store(boolToInt(val)) -} - -// CAS is an atomic compare-and-swap for bool values. -// -// Deprecated: Use CompareAndSwap. -func (x *Bool) CAS(old, new bool) (swapped bool) { - return x.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap for bool values. -func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) { - return x.v.CompareAndSwap(boolToInt(old), boolToInt(new)) -} - -// Swap atomically stores the given bool and returns the old -// value. -func (x *Bool) Swap(val bool) (old bool) { - return truthy(x.v.Swap(boolToInt(val))) -} - -// MarshalJSON encodes the wrapped bool into JSON. -func (x *Bool) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a bool from JSON. -func (x *Bool) UnmarshalJSON(b []byte) error { - var v bool - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go deleted file mode 100644 index ae7390ee6..000000000 --- a/vendor/go.uber.org/atomic/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package atomic provides simple wrappers around numerics to enforce atomic -// access. -package atomic diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go deleted file mode 100644 index 6f4157445..000000000 --- a/vendor/go.uber.org/atomic/duration.go +++ /dev/null @@ -1,89 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "time" -) - -// Duration is an atomic type-safe wrapper for time.Duration values. -type Duration struct { - _ nocmp // disallow non-atomic comparison - - v Int64 -} - -var _zeroDuration time.Duration - -// NewDuration creates a new Duration. -func NewDuration(val time.Duration) *Duration { - x := &Duration{} - if val != _zeroDuration { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped time.Duration. -func (x *Duration) Load() time.Duration { - return time.Duration(x.v.Load()) -} - -// Store atomically stores the passed time.Duration. -func (x *Duration) Store(val time.Duration) { - x.v.Store(int64(val)) -} - -// CAS is an atomic compare-and-swap for time.Duration values. -// -// Deprecated: Use CompareAndSwap. -func (x *Duration) CAS(old, new time.Duration) (swapped bool) { - return x.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap for time.Duration values. -func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) { - return x.v.CompareAndSwap(int64(old), int64(new)) -} - -// Swap atomically stores the given time.Duration and returns the old -// value. -func (x *Duration) Swap(val time.Duration) (old time.Duration) { - return time.Duration(x.v.Swap(int64(val))) -} - -// MarshalJSON encodes the wrapped time.Duration into JSON. -func (x *Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a time.Duration from JSON. -func (x *Duration) UnmarshalJSON(b []byte) error { - var v time.Duration - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go deleted file mode 100644 index 4c18b0a9e..000000000 --- a/vendor/go.uber.org/atomic/duration_ext.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "time" - -//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go - -// Add atomically adds to the wrapped time.Duration and returns the new value. -func (d *Duration) Add(delta time.Duration) time.Duration { - return time.Duration(d.v.Add(int64(delta))) -} - -// Sub atomically subtracts from the wrapped time.Duration and returns the new value. -func (d *Duration) Sub(delta time.Duration) time.Duration { - return time.Duration(d.v.Sub(int64(delta))) -} - -// String encodes the wrapped value as a string. -func (d *Duration) String() string { - return d.Load().String() -} diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go deleted file mode 100644 index 27b23ea16..000000000 --- a/vendor/go.uber.org/atomic/error.go +++ /dev/null @@ -1,62 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// Error is an atomic type-safe wrapper for error values. -type Error struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroError error - -// NewError creates a new Error. -func NewError(val error) *Error { - x := &Error{} - if val != _zeroError { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped error. -func (x *Error) Load() error { - return unpackError(x.v.Load()) -} - -// Store atomically stores the passed error. -func (x *Error) Store(val error) { - x.v.Store(packError(val)) -} - -// CompareAndSwap is an atomic compare-and-swap for error values. -func (x *Error) CompareAndSwap(old, new error) (swapped bool) { - return x.v.CompareAndSwap(packError(old), packError(new)) -} - -// Swap atomically stores the given error and returns the old -// value. -func (x *Error) Swap(val error) (old error) { - return unpackError(x.v.Swap(packError(val))) -} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go deleted file mode 100644 index d31fb633b..000000000 --- a/vendor/go.uber.org/atomic/error_ext.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// atomic.Value panics on nil inputs, or if the underlying type changes. -// Stabilize by always storing a custom struct that we control. - -//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go - -type packedError struct{ Value error } - -func packError(v error) interface{} { - return packedError{v} -} - -func unpackError(v interface{}) error { - if err, ok := v.(packedError); ok { - return err.Value - } - return nil -} diff --git a/vendor/go.uber.org/atomic/float32.go b/vendor/go.uber.org/atomic/float32.go deleted file mode 100644 index 5d535a6d2..000000000 --- a/vendor/go.uber.org/atomic/float32.go +++ /dev/null @@ -1,77 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "math" -) - -// Float32 is an atomic type-safe wrapper for float32 values. -type Float32 struct { - _ nocmp // disallow non-atomic comparison - - v Uint32 -} - -var _zeroFloat32 float32 - -// NewFloat32 creates a new Float32. -func NewFloat32(val float32) *Float32 { - x := &Float32{} - if val != _zeroFloat32 { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped float32. -func (x *Float32) Load() float32 { - return math.Float32frombits(x.v.Load()) -} - -// Store atomically stores the passed float32. -func (x *Float32) Store(val float32) { - x.v.Store(math.Float32bits(val)) -} - -// Swap atomically stores the given float32 and returns the old -// value. -func (x *Float32) Swap(val float32) (old float32) { - return math.Float32frombits(x.v.Swap(math.Float32bits(val))) -} - -// MarshalJSON encodes the wrapped float32 into JSON. -func (x *Float32) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a float32 from JSON. -func (x *Float32) UnmarshalJSON(b []byte) error { - var v float32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/float32_ext.go b/vendor/go.uber.org/atomic/float32_ext.go deleted file mode 100644 index b0cd8d9c8..000000000 --- a/vendor/go.uber.org/atomic/float32_ext.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "math" - "strconv" -) - -//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go - -// Add atomically adds to the wrapped float32 and returns the new value. -func (f *Float32) Add(delta float32) float32 { - for { - old := f.Load() - new := old + delta - if f.CAS(old, new) { - return new - } - } -} - -// Sub atomically subtracts from the wrapped float32 and returns the new value. -func (f *Float32) Sub(delta float32) float32 { - return f.Add(-delta) -} - -// CAS is an atomic compare-and-swap for float32 values. -// -// Deprecated: Use CompareAndSwap -func (f *Float32) CAS(old, new float32) (swapped bool) { - return f.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap for float32 values. -// -// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators -// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. -// This avoids typical CompareAndSwap loops from blocking forever, e.g., -// -// for { -// old := atom.Load() -// new = f(old) -// if atom.CompareAndSwap(old, new) { -// break -// } -// } -// -// If CompareAndSwap did not match NaN to match, then the above would loop forever. -func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) { - return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new)) -} - -// String encodes the wrapped value as a string. -func (f *Float32) String() string { - // 'g' is the behavior for floats with %v. - return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32) -} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go deleted file mode 100644 index 11d5189a5..000000000 --- a/vendor/go.uber.org/atomic/float64.go +++ /dev/null @@ -1,77 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "math" -) - -// Float64 is an atomic type-safe wrapper for float64 values. -type Float64 struct { - _ nocmp // disallow non-atomic comparison - - v Uint64 -} - -var _zeroFloat64 float64 - -// NewFloat64 creates a new Float64. -func NewFloat64(val float64) *Float64 { - x := &Float64{} - if val != _zeroFloat64 { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped float64. -func (x *Float64) Load() float64 { - return math.Float64frombits(x.v.Load()) -} - -// Store atomically stores the passed float64. -func (x *Float64) Store(val float64) { - x.v.Store(math.Float64bits(val)) -} - -// Swap atomically stores the given float64 and returns the old -// value. -func (x *Float64) Swap(val float64) (old float64) { - return math.Float64frombits(x.v.Swap(math.Float64bits(val))) -} - -// MarshalJSON encodes the wrapped float64 into JSON. -func (x *Float64) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a float64 from JSON. -func (x *Float64) UnmarshalJSON(b []byte) error { - var v float64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go deleted file mode 100644 index 48c52b0ab..000000000 --- a/vendor/go.uber.org/atomic/float64_ext.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "math" - "strconv" -) - -//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go - -// Add atomically adds to the wrapped float64 and returns the new value. -func (f *Float64) Add(delta float64) float64 { - for { - old := f.Load() - new := old + delta - if f.CAS(old, new) { - return new - } - } -} - -// Sub atomically subtracts from the wrapped float64 and returns the new value. -func (f *Float64) Sub(delta float64) float64 { - return f.Add(-delta) -} - -// CAS is an atomic compare-and-swap for float64 values. -// -// Deprecated: Use CompareAndSwap -func (f *Float64) CAS(old, new float64) (swapped bool) { - return f.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap for float64 values. -// -// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators -// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. -// This avoids typical CompareAndSwap loops from blocking forever, e.g., -// -// for { -// old := atom.Load() -// new = f(old) -// if atom.CompareAndSwap(old, new) { -// break -// } -// } -// -// If CompareAndSwap did not match NaN to match, then the above would loop forever. -func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) { - return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new)) -} - -// String encodes the wrapped value as a string. -func (f *Float64) String() string { - // 'g' is the behavior for floats with %v. - return strconv.FormatFloat(f.Load(), 'g', -1, 64) -} diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go deleted file mode 100644 index 1e9ef4f87..000000000 --- a/vendor/go.uber.org/atomic/gen.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go -//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go -//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go -//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go -//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go deleted file mode 100644 index b9a68f42c..000000000 --- a/vendor/go.uber.org/atomic/int32.go +++ /dev/null @@ -1,109 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Int32 is an atomic wrapper around int32. -type Int32 struct { - _ nocmp // disallow non-atomic comparison - - v int32 -} - -// NewInt32 creates a new Int32. -func NewInt32(val int32) *Int32 { - return &Int32{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Int32) Load() int32 { - return atomic.LoadInt32(&i.v) -} - -// Add atomically adds to the wrapped int32 and returns the new value. -func (i *Int32) Add(delta int32) int32 { - return atomic.AddInt32(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped int32 and returns the new value. -func (i *Int32) Sub(delta int32) int32 { - return atomic.AddInt32(&i.v, -delta) -} - -// Inc atomically increments the wrapped int32 and returns the new value. -func (i *Int32) Inc() int32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Int32) Dec() int32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -// -// Deprecated: Use CompareAndSwap. -func (i *Int32) CAS(old, new int32) (swapped bool) { - return i.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) { - return atomic.CompareAndSwapInt32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int32) Store(val int32) { - atomic.StoreInt32(&i.v, val) -} - -// Swap atomically swaps the wrapped int32 and returns the old value. -func (i *Int32) Swap(val int32) (old int32) { - return atomic.SwapInt32(&i.v, val) -} - -// MarshalJSON encodes the wrapped int32 into JSON. -func (i *Int32) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped int32. -func (i *Int32) UnmarshalJSON(b []byte) error { - var v int32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Int32) String() string { - v := i.Load() - return strconv.FormatInt(int64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go deleted file mode 100644 index 78d260976..000000000 --- a/vendor/go.uber.org/atomic/int64.go +++ /dev/null @@ -1,109 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Int64 is an atomic wrapper around int64. -type Int64 struct { - _ nocmp // disallow non-atomic comparison - - v int64 -} - -// NewInt64 creates a new Int64. -func NewInt64(val int64) *Int64 { - return &Int64{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Int64) Load() int64 { - return atomic.LoadInt64(&i.v) -} - -// Add atomically adds to the wrapped int64 and returns the new value. -func (i *Int64) Add(delta int64) int64 { - return atomic.AddInt64(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped int64 and returns the new value. -func (i *Int64) Sub(delta int64) int64 { - return atomic.AddInt64(&i.v, -delta) -} - -// Inc atomically increments the wrapped int64 and returns the new value. -func (i *Int64) Inc() int64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int64 and returns the new value. -func (i *Int64) Dec() int64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -// -// Deprecated: Use CompareAndSwap. -func (i *Int64) CAS(old, new int64) (swapped bool) { - return i.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) { - return atomic.CompareAndSwapInt64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int64) Store(val int64) { - atomic.StoreInt64(&i.v, val) -} - -// Swap atomically swaps the wrapped int64 and returns the old value. -func (i *Int64) Swap(val int64) (old int64) { - return atomic.SwapInt64(&i.v, val) -} - -// MarshalJSON encodes the wrapped int64 into JSON. -func (i *Int64) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped int64. -func (i *Int64) UnmarshalJSON(b []byte) error { - var v int64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Int64) String() string { - v := i.Load() - return strconv.FormatInt(int64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go deleted file mode 100644 index 54b74174a..000000000 --- a/vendor/go.uber.org/atomic/nocmp.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// nocmp is an uncomparable struct. Embed this inside another struct to make -// it uncomparable. -// -// type Foo struct { -// nocmp -// // ... -// } -// -// This DOES NOT: -// -// - Disallow shallow copies of structs -// - Disallow comparison of pointers to uncomparable structs -type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/pointer_go118.go b/vendor/go.uber.org/atomic/pointer_go118.go deleted file mode 100644 index e0f47dba4..000000000 --- a/vendor/go.uber.org/atomic/pointer_go118.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:build go1.18 && !go1.19 -// +build go1.18,!go1.19 - -package atomic - -import "unsafe" - -type Pointer[T any] struct { - _ nocmp // disallow non-atomic comparison - p UnsafePointer -} - -// NewPointer creates a new Pointer. -func NewPointer[T any](v *T) *Pointer[T] { - var p Pointer[T] - if v != nil { - p.p.Store(unsafe.Pointer(v)) - } - return &p -} - -// Load atomically loads the wrapped value. -func (p *Pointer[T]) Load() *T { - return (*T)(p.p.Load()) -} - -// Store atomically stores the passed value. -func (p *Pointer[T]) Store(val *T) { - p.p.Store(unsafe.Pointer(val)) -} - -// Swap atomically swaps the wrapped pointer and returns the old value. -func (p *Pointer[T]) Swap(val *T) (old *T) { - return (*T)(p.p.Swap(unsafe.Pointer(val))) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { - return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) -} diff --git a/vendor/go.uber.org/atomic/pointer_go119.go b/vendor/go.uber.org/atomic/pointer_go119.go deleted file mode 100644 index 6726f17ad..000000000 --- a/vendor/go.uber.org/atomic/pointer_go119.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:build go1.19 -// +build go1.19 - -package atomic - -import "sync/atomic" - -// Pointer is an atomic pointer of type *T. -type Pointer[T any] struct { - _ nocmp // disallow non-atomic comparison - p atomic.Pointer[T] -} - -// NewPointer creates a new Pointer. -func NewPointer[T any](v *T) *Pointer[T] { - var p Pointer[T] - if v != nil { - p.p.Store(v) - } - return &p -} - -// Load atomically loads the wrapped value. -func (p *Pointer[T]) Load() *T { - return p.p.Load() -} - -// Store atomically stores the passed value. -func (p *Pointer[T]) Store(val *T) { - p.p.Store(val) -} - -// Swap atomically swaps the wrapped pointer and returns the old value. -func (p *Pointer[T]) Swap(val *T) (old *T) { - return p.p.Swap(val) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { - return p.p.CompareAndSwap(old, new) -} diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go deleted file mode 100644 index c4bea70f4..000000000 --- a/vendor/go.uber.org/atomic/string.go +++ /dev/null @@ -1,65 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// String is an atomic type-safe wrapper for string values. -type String struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroString string - -// NewString creates a new String. -func NewString(val string) *String { - x := &String{} - if val != _zeroString { - x.Store(val) - } - return x -} - -// Load atomically loads the wrapped string. -func (x *String) Load() string { - if v := x.v.Load(); v != nil { - return v.(string) - } - return _zeroString -} - -// Store atomically stores the passed string. -func (x *String) Store(val string) { - x.v.Store(val) -} - -// CompareAndSwap is an atomic compare-and-swap for string values. -func (x *String) CompareAndSwap(old, new string) (swapped bool) { - return x.v.CompareAndSwap(old, new) -} - -// Swap atomically stores the given string and returns the old -// value. -func (x *String) Swap(val string) (old string) { - return x.v.Swap(val).(string) -} diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go deleted file mode 100644 index 1f63dfd5b..000000000 --- a/vendor/go.uber.org/atomic/string_ext.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -compareandswap -swap -file=string.go - -// String returns the wrapped value. -func (s *String) String() string { - return s.Load() -} - -// MarshalText encodes the wrapped string into a textual form. -// -// This makes it encodable as JSON, YAML, XML, and more. -func (s *String) MarshalText() ([]byte, error) { - return []byte(s.Load()), nil -} - -// UnmarshalText decodes text and replaces the wrapped string with it. -// -// This makes it decodable from JSON, YAML, XML, and more. -func (s *String) UnmarshalText(b []byte) error { - s.Store(string(b)) - return nil -} diff --git a/vendor/go.uber.org/atomic/time_ext.go b/vendor/go.uber.org/atomic/time_ext.go deleted file mode 100644 index 1e3dc978a..000000000 --- a/vendor/go.uber.org/atomic/time_ext.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "time" - -//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go - -func packTime(t time.Time) interface{} { - return t -} - -func unpackTime(v interface{}) time.Time { - if t, ok := v.(time.Time); ok { - return t - } - return time.Time{} -} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go deleted file mode 100644 index d6f04a96d..000000000 --- a/vendor/go.uber.org/atomic/uint32.go +++ /dev/null @@ -1,109 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uint32 is an atomic wrapper around uint32. -type Uint32 struct { - _ nocmp // disallow non-atomic comparison - - v uint32 -} - -// NewUint32 creates a new Uint32. -func NewUint32(val uint32) *Uint32 { - return &Uint32{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Uint32) Load() uint32 { - return atomic.LoadUint32(&i.v) -} - -// Add atomically adds to the wrapped uint32 and returns the new value. -func (i *Uint32) Add(delta uint32) uint32 { - return atomic.AddUint32(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped uint32 and returns the new value. -func (i *Uint32) Sub(delta uint32) uint32 { - return atomic.AddUint32(&i.v, ^(delta - 1)) -} - -// Inc atomically increments the wrapped uint32 and returns the new value. -func (i *Uint32) Inc() uint32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint32 and returns the new value. -func (i *Uint32) Dec() uint32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -// -// Deprecated: Use CompareAndSwap. -func (i *Uint32) CAS(old, new uint32) (swapped bool) { - return i.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) { - return atomic.CompareAndSwapUint32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint32) Store(val uint32) { - atomic.StoreUint32(&i.v, val) -} - -// Swap atomically swaps the wrapped uint32 and returns the old value. -func (i *Uint32) Swap(val uint32) (old uint32) { - return atomic.SwapUint32(&i.v, val) -} - -// MarshalJSON encodes the wrapped uint32 into JSON. -func (i *Uint32) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uint32. -func (i *Uint32) UnmarshalJSON(b []byte) error { - var v uint32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uint32) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go deleted file mode 100644 index 2574bdd5e..000000000 --- a/vendor/go.uber.org/atomic/uint64.go +++ /dev/null @@ -1,109 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uint64 is an atomic wrapper around uint64. -type Uint64 struct { - _ nocmp // disallow non-atomic comparison - - v uint64 -} - -// NewUint64 creates a new Uint64. -func NewUint64(val uint64) *Uint64 { - return &Uint64{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Uint64) Load() uint64 { - return atomic.LoadUint64(&i.v) -} - -// Add atomically adds to the wrapped uint64 and returns the new value. -func (i *Uint64) Add(delta uint64) uint64 { - return atomic.AddUint64(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped uint64 and returns the new value. -func (i *Uint64) Sub(delta uint64) uint64 { - return atomic.AddUint64(&i.v, ^(delta - 1)) -} - -// Inc atomically increments the wrapped uint64 and returns the new value. -func (i *Uint64) Inc() uint64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint64 and returns the new value. -func (i *Uint64) Dec() uint64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -// -// Deprecated: Use CompareAndSwap. -func (i *Uint64) CAS(old, new uint64) (swapped bool) { - return i.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) { - return atomic.CompareAndSwapUint64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint64) Store(val uint64) { - atomic.StoreUint64(&i.v, val) -} - -// Swap atomically swaps the wrapped uint64 and returns the old value. -func (i *Uint64) Swap(val uint64) (old uint64) { - return atomic.SwapUint64(&i.v, val) -} - -// MarshalJSON encodes the wrapped uint64 into JSON. -func (i *Uint64) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uint64. -func (i *Uint64) UnmarshalJSON(b []byte) error { - var v uint64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uint64) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go deleted file mode 100644 index 81b275a7a..000000000 --- a/vendor/go.uber.org/atomic/uintptr.go +++ /dev/null @@ -1,109 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uintptr is an atomic wrapper around uintptr. -type Uintptr struct { - _ nocmp // disallow non-atomic comparison - - v uintptr -} - -// NewUintptr creates a new Uintptr. -func NewUintptr(val uintptr) *Uintptr { - return &Uintptr{v: val} -} - -// Load atomically loads the wrapped value. -func (i *Uintptr) Load() uintptr { - return atomic.LoadUintptr(&i.v) -} - -// Add atomically adds to the wrapped uintptr and returns the new value. -func (i *Uintptr) Add(delta uintptr) uintptr { - return atomic.AddUintptr(&i.v, delta) -} - -// Sub atomically subtracts from the wrapped uintptr and returns the new value. -func (i *Uintptr) Sub(delta uintptr) uintptr { - return atomic.AddUintptr(&i.v, ^(delta - 1)) -} - -// Inc atomically increments the wrapped uintptr and returns the new value. -func (i *Uintptr) Inc() uintptr { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uintptr and returns the new value. -func (i *Uintptr) Dec() uintptr { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -// -// Deprecated: Use CompareAndSwap. -func (i *Uintptr) CAS(old, new uintptr) (swapped bool) { - return i.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) { - return atomic.CompareAndSwapUintptr(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uintptr) Store(val uintptr) { - atomic.StoreUintptr(&i.v, val) -} - -// Swap atomically swaps the wrapped uintptr and returns the old value. -func (i *Uintptr) Swap(val uintptr) (old uintptr) { - return atomic.SwapUintptr(&i.v, val) -} - -// MarshalJSON encodes the wrapped uintptr into JSON. -func (i *Uintptr) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uintptr. -func (i *Uintptr) UnmarshalJSON(b []byte) error { - var v uintptr - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uintptr) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go deleted file mode 100644 index 34868baf6..000000000 --- a/vendor/go.uber.org/atomic/unsafe_pointer.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2021-2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "sync/atomic" - "unsafe" -) - -// UnsafePointer is an atomic wrapper around unsafe.Pointer. -type UnsafePointer struct { - _ nocmp // disallow non-atomic comparison - - v unsafe.Pointer -} - -// NewUnsafePointer creates a new UnsafePointer. -func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer { - return &UnsafePointer{v: val} -} - -// Load atomically loads the wrapped value. -func (p *UnsafePointer) Load() unsafe.Pointer { - return atomic.LoadPointer(&p.v) -} - -// Store atomically stores the passed value. -func (p *UnsafePointer) Store(val unsafe.Pointer) { - atomic.StorePointer(&p.v, val) -} - -// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value. -func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) { - return atomic.SwapPointer(&p.v, val) -} - -// CAS is an atomic compare-and-swap. -// -// Deprecated: Use CompareAndSwap -func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) { - return p.CompareAndSwap(old, new) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) { - return atomic.CompareAndSwapPointer(&p.v, old, new) -} diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go deleted file mode 100644 index 52caedb9a..000000000 --- a/vendor/go.uber.org/atomic/value.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "sync/atomic" - -// Value shadows the type of the same name from sync/atomic -// https://godoc.org/sync/atomic#Value -type Value struct { - _ nocmp // disallow non-atomic comparison - - atomic.Value -} diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml new file mode 100644 index 000000000..fbc6df790 --- /dev/null +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -0,0 +1,77 @@ +output: + # Make output more digestible with quickfix in vim/emacs/etc. + sort-results: true + print-issued-lines: false + +linters: + # We'll track the golangci-lint default linters manually + # instead of letting them change without our control. + disable-all: true + enable: + # golangci-lint defaults: + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - unused + + # Our own extras: + - gofmt + - nolintlint # lints nolint directives + - revive + +linters-settings: + govet: + # These govet checks are disabled by default, but they're useful. + enable: + - niliness + - reflectvaluecompare + - sortslice + - unusedwrite + + errcheck: + exclude-functions: + # These methods can not fail. + # They operate on an in-memory buffer. + - (*go.uber.org/zap/buffer.Buffer).Write + - (*go.uber.org/zap/buffer.Buffer).WriteByte + - (*go.uber.org/zap/buffer.Buffer).WriteString + + - (*go.uber.org/zap/zapio.Writer).Close + - (*go.uber.org/zap/zapio.Writer).Sync + - (*go.uber.org/zap/zapio.Writer).Write + # Write to zapio.Writer cannot fail, + # so io.WriteString on it cannot fail. + - io.WriteString(*go.uber.org/zap/zapio.Writer) + + # Writing a plain string to a fmt.State cannot fail. + - io.WriteString(fmt.State) + +issues: + # Print all issues reported by all linters. + max-issues-per-linter: 0 + max-same-issues: 0 + + # Don't ignore some of the issues that golangci-lint considers okay. + # This includes documenting all exported entities. + exclude-use-default: false + + exclude-rules: + # Don't warn on unused parameters. + # Parameter names are useful; replacing them with '_' is undesirable. + - linters: [revive] + text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _' + + # staticcheck already has smarter checks for empty blocks. + # revive's empty-block linter has false positives. + # For example, as of writing this, the following is not allowed. + # for foo() { } + - linters: [revive] + text: 'empty-block: this block is empty, you can remove it' + + # Ignore logger.Sync() errcheck failures in example_test.go + # since those are intended to be uncomplicated examples. + - linters: [errcheck] + path: example_test.go + text: 'Error return value of `logger.Sync` is not checked' diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 0db1f9f15..11b465976 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,7 +1,39 @@ # Changelog All notable changes to this project will be documented in this file. -This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 1.26.0 (14 Sep 2023) +Enhancements: +* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured +context. +* [#1350][]: String encoding is much (~50%) faster now. + +Thanks to @jquirke, @cdvr1993 for their contributions to this release. + +[#1319]: https://github.com/uber-go/zap/pull/1319 +[#1350]: https://github.com/uber-go/zap/pull/1350 + +## 1.25.0 (1 Aug 2023) + +This release contains several improvements including performance, API additions, +and two new experimental packages whose APIs are unstable and may change in the +future. + +Enhancements: +* [#1246][]: Add `zap/exp/zapslog` package for integration with slog. +* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. +* [#1281][]: Add `zap/exp/expfield` package which contains helper methods +`Str` and `Strs` for constructing String-like zap.Fields. +* [#1310][]: Reduce stack size on `Any`. + +Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions +to this release. + +[#1246]: https://github.com/uber-go/zap/pull/1246 +[#1273]: https://github.com/uber-go/zap/pull/1273 +[#1281]: https://github.com/uber-go/zap/pull/1281 +[#1310]: https://github.com/uber-go/zap/pull/1310 ## 1.24.0 (30 Nov 2022) @@ -27,7 +59,6 @@ Enhancements: [#1147]: https://github.com/uber-go/zap/pull/1147 [#1155]: https://github.com/uber-go/zap/pull/1155 - ## 1.22.0 (8 Aug 2022) Enhancements: @@ -176,6 +207,16 @@ Enhancements: Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. +[#865]: https://github.com/uber-go/zap/pull/865 +[#867]: https://github.com/uber-go/zap/pull/867 +[#881]: https://github.com/uber-go/zap/pull/881 +[#903]: https://github.com/uber-go/zap/pull/903 +[#912]: https://github.com/uber-go/zap/pull/912 +[#913]: https://github.com/uber-go/zap/pull/913 +[#928]: https://github.com/uber-go/zap/pull/928 +[#931]: https://github.com/uber-go/zap/pull/931 +[#936]: https://github.com/uber-go/zap/pull/936 + ## 1.16.0 (1 Sep 2020) Bugfixes: @@ -197,6 +238,17 @@ Enhancements: Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. +[#629]: https://github.com/uber-go/zap/pull/629 +[#697]: https://github.com/uber-go/zap/pull/697 +[#828]: https://github.com/uber-go/zap/pull/828 +[#835]: https://github.com/uber-go/zap/pull/835 +[#843]: https://github.com/uber-go/zap/pull/843 +[#844]: https://github.com/uber-go/zap/pull/844 +[#852]: https://github.com/uber-go/zap/pull/852 +[#854]: https://github.com/uber-go/zap/pull/854 +[#861]: https://github.com/uber-go/zap/pull/861 +[#862]: https://github.com/uber-go/zap/pull/862 + ## 1.15.0 (23 Apr 2020) Bugfixes: @@ -213,6 +265,11 @@ Enhancements: Thanks to @danielbprice for their contributions to this release. +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 + ## 1.14.1 (14 Mar 2020) Bugfixes: @@ -225,6 +282,10 @@ Bugfixes: Thanks to @YashishDua for their contributions to this release. +[#791]: https://github.com/uber-go/zap/pull/791 +[#795]: https://github.com/uber-go/zap/pull/795 +[#799]: https://github.com/uber-go/zap/pull/799 + ## 1.14.0 (20 Feb 2020) Enhancements: @@ -235,6 +296,11 @@ Enhancements: Thanks to @caibirdme for their contributions to this release. +[#771]: https://github.com/uber-go/zap/pull/771 +[#773]: https://github.com/uber-go/zap/pull/773 +[#775]: https://github.com/uber-go/zap/pull/775 +[#786]: https://github.com/uber-go/zap/pull/786 + ## 1.13.0 (13 Nov 2019) Enhancements: @@ -243,11 +309,15 @@ Enhancements: Thanks to @jbizzle for their contributions to this release. +[#758]: https://github.com/uber-go/zap/pull/758 + ## 1.12.0 (29 Oct 2019) Enhancements: * [#751][]: Migrate to Go modules. +[#751]: https://github.com/uber-go/zap/pull/751 + ## 1.11.0 (21 Oct 2019) Enhancements: @@ -256,6 +326,9 @@ Enhancements: Thanks to @juicemia, @uhthomas for their contributions to this release. +[#725]: https://github.com/uber-go/zap/pull/725 +[#736]: https://github.com/uber-go/zap/pull/736 + ## 1.10.0 (29 Apr 2019) Bugfixes: @@ -273,12 +346,20 @@ Enhancements: Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions to this release. +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 + ## v1.9.1 (06 Aug 2018) Bugfixes: * [#614][]: MapObjectEncoder should not ignore empty slices. +[#614]: https://github.com/uber-go/zap/pull/614 + ## v1.9.0 (19 Jul 2018) Enhancements: @@ -288,6 +369,10 @@ Enhancements: Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and @dimroc for their contributions to this release. +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 + ## v1.8.0 (13 Apr 2018) Enhancements: @@ -301,11 +386,18 @@ Bugfixes: Thanks to @DiSiqueira and @djui for their contributions to this release. +[#508]: https://github.com/uber-go/zap/pull/508 +[#518]: https://github.com/uber-go/zap/pull/518 +[#577]: https://github.com/uber-go/zap/pull/577 +[#574]: https://github.com/uber-go/zap/pull/574 + ## v1.7.1 (25 Sep 2017) Bugfixes: * [#504][]: Store strings when using AddByteString with the map encoder. +[#504]: https://github.com/uber-go/zap/pull/504 + ## v1.7.0 (21 Sep 2017) Enhancements: @@ -313,6 +405,8 @@ Enhancements: * [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user to specify the level of the logged messages. +[#487]: https://github.com/uber-go/zap/pull/487 + ## v1.6.0 (30 Aug 2017) Enhancements: @@ -321,6 +415,9 @@ Enhancements: * [#490][]: Add a `ContextMap` method to observer logs for simpler field validation in tests. +[#490]: https://github.com/uber-go/zap/pull/490 +[#491]: https://github.com/uber-go/zap/pull/491 + ## v1.5.0 (22 Jul 2017) Enhancements: @@ -334,6 +431,11 @@ Bugfixes: Thanks to @richard-tunein and @pavius for their contributions to this release. +[#477]: https://github.com/uber-go/zap/pull/477 +[#465]: https://github.com/uber-go/zap/pull/465 +[#460]: https://github.com/uber-go/zap/pull/460 +[#470]: https://github.com/uber-go/zap/pull/470 + ## v1.4.1 (08 Jun 2017) This release fixes two bugs. @@ -343,6 +445,9 @@ Bugfixes: * [#435][]: Support a variety of case conventions when unmarshaling levels. * [#444][]: Fix a panic in the observer. +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 + ## v1.4.0 (12 May 2017) This release adds a few small features and is fully backward-compatible. @@ -355,6 +460,10 @@ Enhancements: * [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a variety of operations a bit simpler. +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 + ## v1.3.0 (25 Apr 2017) This release adds an enhancement to zap's testing helpers as well as the @@ -366,6 +475,9 @@ Enhancements: particularly useful when testing the `SugaredLogger`. * [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 + ## v1.2.0 (13 Apr 2017) This release adds a gRPC compatibility wrapper. It is fully backward-compatible. @@ -375,6 +487,8 @@ Enhancements: * [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements `grpclog.Logger`. +[#402]: https://github.com/uber-go/zap/pull/402 + ## v1.1.0 (31 Mar 2017) This release fixes two bugs and adds some enhancements to zap's testing helpers. @@ -392,6 +506,10 @@ Enhancements: Thanks to @moitias for contributing to this release. +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 + ## v1.0.0 (14 Mar 2017) This is zap's first stable release. All exported APIs are now final, and no @@ -437,6 +555,20 @@ Enhancements: Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their contributions to this release. +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 + ## v1.0.0-rc.3 (7 Mar 2017) This is the third release candidate for zap's stable release. There are no @@ -458,6 +590,11 @@ Enhancements: Thanks to @ansel1 and @suyash for their contributions to this release. +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 + ## v1.0.0-rc.2 (21 Feb 2017) This is the second release candidate for zap's stable release. It includes two @@ -495,6 +632,15 @@ Enhancements: Thanks to @skipor and @chapsuk for their contributions to this release. +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 + ## v1.0.0-rc.1 (14 Feb 2017) This is the first release candidate for zap's stable release. There are multiple @@ -523,95 +669,3 @@ backward compatibility concerns and all functionality is new. Early zap adopters should pin to the 0.1.x minor version until they're ready to upgrade to the upcoming stable release. - -[#316]: https://github.com/uber-go/zap/pull/316 -[#309]: https://github.com/uber-go/zap/pull/309 -[#317]: https://github.com/uber-go/zap/pull/317 -[#321]: https://github.com/uber-go/zap/pull/321 -[#325]: https://github.com/uber-go/zap/pull/325 -[#333]: https://github.com/uber-go/zap/pull/333 -[#326]: https://github.com/uber-go/zap/pull/326 -[#300]: https://github.com/uber-go/zap/pull/300 -[#339]: https://github.com/uber-go/zap/pull/339 -[#307]: https://github.com/uber-go/zap/pull/307 -[#353]: https://github.com/uber-go/zap/pull/353 -[#311]: https://github.com/uber-go/zap/pull/311 -[#366]: https://github.com/uber-go/zap/pull/366 -[#364]: https://github.com/uber-go/zap/pull/364 -[#371]: https://github.com/uber-go/zap/pull/371 -[#362]: https://github.com/uber-go/zap/pull/362 -[#369]: https://github.com/uber-go/zap/pull/369 -[#347]: https://github.com/uber-go/zap/pull/347 -[#373]: https://github.com/uber-go/zap/pull/373 -[#348]: https://github.com/uber-go/zap/pull/348 -[#327]: https://github.com/uber-go/zap/pull/327 -[#376]: https://github.com/uber-go/zap/pull/376 -[#346]: https://github.com/uber-go/zap/pull/346 -[#365]: https://github.com/uber-go/zap/pull/365 -[#372]: https://github.com/uber-go/zap/pull/372 -[#385]: https://github.com/uber-go/zap/pull/385 -[#396]: https://github.com/uber-go/zap/pull/396 -[#386]: https://github.com/uber-go/zap/pull/386 -[#402]: https://github.com/uber-go/zap/pull/402 -[#415]: https://github.com/uber-go/zap/pull/415 -[#416]: https://github.com/uber-go/zap/pull/416 -[#424]: https://github.com/uber-go/zap/pull/424 -[#425]: https://github.com/uber-go/zap/pull/425 -[#431]: https://github.com/uber-go/zap/pull/431 -[#435]: https://github.com/uber-go/zap/pull/435 -[#444]: https://github.com/uber-go/zap/pull/444 -[#477]: https://github.com/uber-go/zap/pull/477 -[#465]: https://github.com/uber-go/zap/pull/465 -[#460]: https://github.com/uber-go/zap/pull/460 -[#470]: https://github.com/uber-go/zap/pull/470 -[#487]: https://github.com/uber-go/zap/pull/487 -[#490]: https://github.com/uber-go/zap/pull/490 -[#491]: https://github.com/uber-go/zap/pull/491 -[#504]: https://github.com/uber-go/zap/pull/504 -[#508]: https://github.com/uber-go/zap/pull/508 -[#518]: https://github.com/uber-go/zap/pull/518 -[#577]: https://github.com/uber-go/zap/pull/577 -[#574]: https://github.com/uber-go/zap/pull/574 -[#602]: https://github.com/uber-go/zap/pull/602 -[#572]: https://github.com/uber-go/zap/pull/572 -[#606]: https://github.com/uber-go/zap/pull/606 -[#614]: https://github.com/uber-go/zap/pull/614 -[#657]: https://github.com/uber-go/zap/pull/657 -[#706]: https://github.com/uber-go/zap/pull/706 -[#610]: https://github.com/uber-go/zap/pull/610 -[#675]: https://github.com/uber-go/zap/pull/675 -[#704]: https://github.com/uber-go/zap/pull/704 -[#725]: https://github.com/uber-go/zap/pull/725 -[#736]: https://github.com/uber-go/zap/pull/736 -[#751]: https://github.com/uber-go/zap/pull/751 -[#758]: https://github.com/uber-go/zap/pull/758 -[#771]: https://github.com/uber-go/zap/pull/771 -[#773]: https://github.com/uber-go/zap/pull/773 -[#775]: https://github.com/uber-go/zap/pull/775 -[#786]: https://github.com/uber-go/zap/pull/786 -[#791]: https://github.com/uber-go/zap/pull/791 -[#795]: https://github.com/uber-go/zap/pull/795 -[#799]: https://github.com/uber-go/zap/pull/799 -[#804]: https://github.com/uber-go/zap/pull/804 -[#812]: https://github.com/uber-go/zap/pull/812 -[#806]: https://github.com/uber-go/zap/pull/806 -[#813]: https://github.com/uber-go/zap/pull/813 -[#629]: https://github.com/uber-go/zap/pull/629 -[#697]: https://github.com/uber-go/zap/pull/697 -[#828]: https://github.com/uber-go/zap/pull/828 -[#835]: https://github.com/uber-go/zap/pull/835 -[#843]: https://github.com/uber-go/zap/pull/843 -[#844]: https://github.com/uber-go/zap/pull/844 -[#852]: https://github.com/uber-go/zap/pull/852 -[#854]: https://github.com/uber-go/zap/pull/854 -[#861]: https://github.com/uber-go/zap/pull/861 -[#862]: https://github.com/uber-go/zap/pull/862 -[#865]: https://github.com/uber-go/zap/pull/865 -[#867]: https://github.com/uber-go/zap/pull/867 -[#881]: https://github.com/uber-go/zap/pull/881 -[#903]: https://github.com/uber-go/zap/pull/903 -[#912]: https://github.com/uber-go/zap/pull/912 -[#913]: https://github.com/uber-go/zap/pull/913 -[#928]: https://github.com/uber-go/zap/pull/928 -[#931]: https://github.com/uber-go/zap/pull/931 -[#936]: https://github.com/uber-go/zap/pull/936 diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile index 9b1bc3b0e..eb1cee53b 100644 --- a/vendor/go.uber.org/zap/Makefile +++ b/vendor/go.uber.org/zap/Makefile @@ -1,50 +1,51 @@ -export GOBIN ?= $(shell pwd)/bin +# Directory containing the Makefile. +PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) -GOLINT = $(GOBIN)/golint -STATICCHECK = $(GOBIN)/staticcheck +export GOBIN ?= $(PROJECT_ROOT)/bin +export PATH := $(GOBIN):$(PATH) + +GOVULNCHECK = $(GOBIN)/govulncheck BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem # Directories containing independent Go modules. -# -# We track coverage only for the main module. -MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test +MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test -# Many Go tools take file globs or directories as arguments instead of packages. -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) +# Directories that we want to track coverage for. +COVER_DIRS = . ./exp .PHONY: all all: lint test .PHONY: lint -lint: $(GOLINT) $(STATICCHECK) - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log - @echo "Checking vet..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking lint..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking staticcheck..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e Makefile | tee -a lint.log - @echo "Checking for license headers..." - @./checklicense.sh | tee -a lint.log - @[ ! -s lint.log ] - @echo "Checking 'go mod tidy'..." - @make tidy - @if ! git diff --quiet; then \ - echo "'go mod tidy' resulted in changes or working tree is dirty:"; \ - git --no-pager diff; \ - fi - -$(GOLINT): - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): - cd tools && go install honnef.co/go/tools/cmd/staticcheck +lint: golangci-lint tidy-lint license-lint + +.PHONY: golangci-lint +golangci-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] golangci-lint: $(mod)" && \ + golangci-lint run --path-prefix $(mod)) &&) true + +.PHONY: tidy +tidy: + @$(foreach dir,$(MODULE_DIRS), \ + (cd $(dir) && go mod tidy) &&) true + +.PHONY: tidy-lint +tidy-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] tidy: $(mod)" && \ + go mod tidy && \ + git diff --exit-code -- go.mod go.sum) &&) true + + +.PHONY: license-lint +license-lint: + ./checklicense.sh + +$(GOVULNCHECK): + cd tools && go install golang.org/x/vuln/cmd/govulncheck .PHONY: test test: @@ -52,8 +53,10 @@ test: .PHONY: cover cover: - go test -race -coverprofile=cover.out -coverpkg=./... ./... - go tool cover -html=cover.out -o cover.html + @$(foreach dir,$(COVER_DIRS), ( \ + cd $(dir) && \ + go test -race -coverprofile=cover.out -coverpkg=./... ./... \ + && go tool cover -html=cover.out -o cover.html) &&) true .PHONY: bench BENCH ?= . @@ -68,6 +71,6 @@ updatereadme: rm -f README.md cat .readme.tmpl | go run internal/readme/readme.go > README.md -.PHONY: tidy -tidy: - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true +.PHONY: vulncheck +vulncheck: $(GOVULNCHECK) + $(GOVULNCHECK) ./... diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index a553a428c..9de08927b 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -54,7 +54,7 @@ and make many small allocations. Put differently, using `encoding/json` and Zap takes a different approach. It includes a reflection-free, zero-allocation JSON encoder, and the base `Logger` strives to avoid serialization overhead and allocations wherever possible. By building the high-level `SugaredLogger` -on that foundation, zap lets users _choose_ when they need to count every +on that foundation, zap lets users *choose* when they need to count every allocation and when they'd prefer a more familiar, loosely typed API. As measured by its own [benchmarking suite][], not only is zap more performant @@ -64,40 +64,43 @@ id="anchor-versions">[1](#footnote-versions) Log a message and 10 fields: -| Package | Time | Time % to zap | Objects Allocated | -| :------------------ | :---------: | :-----------: | :---------------: | -| :zap: zap | 2900 ns/op | +0% | 5 allocs/op | -| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op | -| zerolog | 10639 ns/op | +267% | 32 allocs/op | -| go-kit | 14434 ns/op | +398% | 59 allocs/op | -| logrus | 17104 ns/op | +490% | 81 allocs/op | -| apex/log | 32424 ns/op | +1018% | 66 allocs/op | -| log15 | 33579 ns/op | +1058% | 76 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 1744 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op +| zerolog | 918 ns/op | -47% | 1 allocs/op +| go-kit | 5590 ns/op | +221% | 57 allocs/op +| slog | 5640 ns/op | +223% | 40 allocs/op +| apex/log | 21184 ns/op | +1115% | 63 allocs/op +| logrus | 24338 ns/op | +1296% | 79 allocs/op +| log15 | 26054 ns/op | +1394% | 74 allocs/op Log a message with a logger that already has 10 fields of context: -| Package | Time | Time % to zap | Objects Allocated | -| :------------------ | :---------: | :-----------: | :---------------: | -| :zap: zap | 373 ns/op | +0% | 0 allocs/op | -| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op | -| zerolog | 288 ns/op | -23% | 0 allocs/op | -| go-kit | 11785 ns/op | +3060% | 58 allocs/op | -| logrus | 19629 ns/op | +5162% | 70 allocs/op | -| log15 | 21866 ns/op | +5762% | 72 allocs/op | -| apex/log | 30890 ns/op | +8182% | 55 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 193 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op +| zerolog | 81 ns/op | -58% | 0 allocs/op +| slog | 322 ns/op | +67% | 0 allocs/op +| go-kit | 5377 ns/op | +2686% | 56 allocs/op +| apex/log | 19518 ns/op | +10013% | 53 allocs/op +| log15 | 19812 ns/op | +10165% | 70 allocs/op +| logrus | 21997 ns/op | +11297% | 68 allocs/op Log a static string, without any context or `printf`-style templating: -| Package | Time | Time % to zap | Objects Allocated | -| :------------------ | :--------: | :-----------: | :---------------: | -| :zap: zap | 381 ns/op | +0% | 0 allocs/op | -| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op | -| zerolog | 369 ns/op | -3% | 0 allocs/op | -| standard library | 385 ns/op | +1% | 2 allocs/op | -| go-kit | 606 ns/op | +59% | 11 allocs/op | -| logrus | 1730 ns/op | +354% | 25 allocs/op | -| apex/log | 1998 ns/op | +424% | 7 allocs/op | -| log15 | 4546 ns/op | +1093% | 22 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 165 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op +| zerolog | 95 ns/op | -42% | 0 allocs/op +| slog | 296 ns/op | +79% | 0 allocs/op +| go-kit | 415 ns/op | +152% | 9 allocs/op +| standard library | 422 ns/op | +156% | 2 allocs/op +| apex/log | 1601 ns/op | +870% | 5 allocs/op +| logrus | 3017 ns/op | +1728% | 23 allocs/op +| log15 | 3469 ns/op | +2002% | 20 allocs/op ## Development Status: Stable @@ -131,3 +134,4 @@ pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) [cov]: https://codecov.io/gh/uber-go/zap [benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks [benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go index 5be3704a3..abfccb566 100644 --- a/vendor/go.uber.org/zap/array.go +++ b/vendor/go.uber.org/zap/array.go @@ -21,6 +21,7 @@ package zap import ( + "fmt" "time" "go.uber.org/zap/zapcore" @@ -94,11 +95,137 @@ func Int8s(key string, nums []int8) Field { return Array(key, int8s(nums)) } +// Objects constructs a field with the given key, holding a list of the +// provided objects that can be marshaled by Zap. +// +// Note that these objects must implement zapcore.ObjectMarshaler directly. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the Request type, not its pointer (*Request). +// If it's on the pointer, use ObjectValues. +// +// Given an object that implements MarshalLogObject on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Author struct{ ... } +// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var authors []Author = ... +// logger.Info("loading article", zap.Objects("authors", authors)) +// +// Similarly, given a type that implements MarshalLogObject on its pointer +// receiver, you can log a slice of pointers to that object with Objects like +// so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +// +// If instead, you have a slice of values of such an object, use the +// ObjectValues constructor. +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field { + return Array(key, objects[T](values)) +} + +type objects[T zapcore.ObjectMarshaler] []T + +func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + if err := arr.AppendObject(o); err != nil { + return err + } + } + return nil +} + +// ObjectMarshalerPtr is a constraint that specifies that the given type +// implements zapcore.ObjectMarshaler on a pointer receiver. +type ObjectMarshalerPtr[T any] interface { + *T + zapcore.ObjectMarshaler +} + +// ObjectValues constructs a field with the given key, holding a list of the +// provided objects, where pointers to these objects can be marshaled by Zap. +// +// Note that pointers to these objects must implement zapcore.ObjectMarshaler. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the *Request type, not the value (Request). +// If it's on the value, use Objects. +// +// Given an object that implements MarshalLogObject on the pointer receiver, +// you can log a slice of those objects with ObjectValues like so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +// +// If instead, you have a slice of pointers of such an object, use the Objects +// field constructor. +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { + return Array(key, objectValues[T, P](values)) +} + +type objectValues[T any, P ObjectMarshalerPtr[T]] []T + +func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range os { + // It is necessary for us to explicitly reference the "P" type. + // We cannot simply pass "&os[i]" to AppendObject because its type + // is "*T", which the type system does not consider as + // implementing ObjectMarshaler. + // Only the type "P" satisfies ObjectMarshaler, which we have + // to convert "*T" to explicitly. + var p P = &os[i] + if err := arr.AppendObject(p); err != nil { + return err + } + } + return nil +} + // Strings constructs a field that carries a slice of strings. func Strings(key string, ss []string) Field { return Array(key, stringArray(ss)) } +// Stringers constructs a field with the given key, holding a list of the +// output provided by the value's String method +// +// Given an object that implements String on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Request struct{ ... } +// func (a Request) String() string +// +// var requests []Request = ... +// logger.Info("sending requests", zap.Stringers("requests", requests)) +// +// Note that these objects must implement fmt.Stringer directly. +// That is, if you're trying to marshal a []Request, the String method +// must be declared on the Request type, not its pointer (*Request). +func Stringers[T fmt.Stringer](key string, values []T) Field { + return Array(key, stringers[T](values)) +} + +type stringers[T fmt.Stringer] []T + +func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + arr.AppendString(o.String()) + } + return nil +} + // Times constructs a field that carries a slice of time.Times. func Times(key string, ts []time.Time) Field { return Array(key, times(ts)) diff --git a/vendor/go.uber.org/zap/array_go118.go b/vendor/go.uber.org/zap/array_go118.go deleted file mode 100644 index d0d2c49d6..000000000 --- a/vendor/go.uber.org/zap/array_go118.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:build go1.18 -// +build go1.18 - -package zap - -import ( - "fmt" - - "go.uber.org/zap/zapcore" -) - -// Objects constructs a field with the given key, holding a list of the -// provided objects that can be marshaled by Zap. -// -// Note that these objects must implement zapcore.ObjectMarshaler directly. -// That is, if you're trying to marshal a []Request, the MarshalLogObject -// method must be declared on the Request type, not its pointer (*Request). -// If it's on the pointer, use ObjectValues. -// -// Given an object that implements MarshalLogObject on the value receiver, you -// can log a slice of those objects with Objects like so: -// -// type Author struct{ ... } -// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error -// -// var authors []Author = ... -// logger.Info("loading article", zap.Objects("authors", authors)) -// -// Similarly, given a type that implements MarshalLogObject on its pointer -// receiver, you can log a slice of pointers to that object with Objects like -// so: -// -// type Request struct{ ... } -// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error -// -// var requests []*Request = ... -// logger.Info("sending requests", zap.Objects("requests", requests)) -// -// If instead, you have a slice of values of such an object, use the -// ObjectValues constructor. -// -// var requests []Request = ... -// logger.Info("sending requests", zap.ObjectValues("requests", requests)) -func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field { - return Array(key, objects[T](values)) -} - -type objects[T zapcore.ObjectMarshaler] []T - -func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for _, o := range os { - if err := arr.AppendObject(o); err != nil { - return err - } - } - return nil -} - -// ObjectMarshalerPtr is a constraint that specifies that the given type -// implements zapcore.ObjectMarshaler on a pointer receiver. -type ObjectMarshalerPtr[T any] interface { - *T - zapcore.ObjectMarshaler -} - -// ObjectValues constructs a field with the given key, holding a list of the -// provided objects, where pointers to these objects can be marshaled by Zap. -// -// Note that pointers to these objects must implement zapcore.ObjectMarshaler. -// That is, if you're trying to marshal a []Request, the MarshalLogObject -// method must be declared on the *Request type, not the value (Request). -// If it's on the value, use Objects. -// -// Given an object that implements MarshalLogObject on the pointer receiver, -// you can log a slice of those objects with ObjectValues like so: -// -// type Request struct{ ... } -// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error -// -// var requests []Request = ... -// logger.Info("sending requests", zap.ObjectValues("requests", requests)) -// -// If instead, you have a slice of pointers of such an object, use the Objects -// field constructor. -// -// var requests []*Request = ... -// logger.Info("sending requests", zap.Objects("requests", requests)) -func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { - return Array(key, objectValues[T, P](values)) -} - -type objectValues[T any, P ObjectMarshalerPtr[T]] []T - -func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range os { - // It is necessary for us to explicitly reference the "P" type. - // We cannot simply pass "&os[i]" to AppendObject because its type - // is "*T", which the type system does not consider as - // implementing ObjectMarshaler. - // Only the type "P" satisfies ObjectMarshaler, which we have - // to convert "*T" to explicitly. - var p P = &os[i] - if err := arr.AppendObject(p); err != nil { - return err - } - } - return nil -} - -// Stringers constructs a field with the given key, holding a list of the -// output provided by the value's String method -// -// Given an object that implements String on the value receiver, you -// can log a slice of those objects with Objects like so: -// -// type Request struct{ ... } -// func (a Request) String() string -// -// var requests []Request = ... -// logger.Info("sending requests", zap.Stringers("requests", requests)) -// -// Note that these objects must implement fmt.Stringer directly. -// That is, if you're trying to marshal a []Request, the String method -// must be declared on the Request type, not its pointer (*Request). -func Stringers[T fmt.Stringer](key string, values []T) Field { - return Array(key, stringers[T](values)) -} - -type stringers[T fmt.Stringer] []T - -func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for _, o := range os { - arr.AppendString(o.String()) - } - return nil -} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 9e929cd98..27fb5cd5d 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -42,6 +42,11 @@ func (b *Buffer) AppendByte(v byte) { b.bs = append(b.bs, v) } +// AppendBytes writes a single byte to the Buffer. +func (b *Buffer) AppendBytes(v []byte) { + b.bs = append(b.bs, v...) +} + // AppendString writes a string to the Buffer. func (b *Buffer) AppendString(s string) { b.bs = append(b.bs, s...) diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go index 8fb3e202c..846323360 100644 --- a/vendor/go.uber.org/zap/buffer/pool.go +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -20,25 +20,29 @@ package buffer -import "sync" +import ( + "go.uber.org/zap/internal/pool" +) // A Pool is a type-safe wrapper around a sync.Pool. type Pool struct { - p *sync.Pool + p *pool.Pool[*Buffer] } // NewPool constructs a new Pool. func NewPool() Pool { - return Pool{p: &sync.Pool{ - New: func() interface{} { - return &Buffer{bs: make([]byte, 0, _size)} - }, - }} + return Pool{ + p: pool.New(func() *Buffer { + return &Buffer{ + bs: make([]byte, 0, _size), + } + }), + } } // Get retrieves a Buffer from the pool, creating one if necessary. func (p Pool) Get() *Buffer { - buf := p.p.Get().(*Buffer) + buf := p.p.Get() buf.Reset() buf.pool = p return buf diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go index ee6096766..e76e4e64f 100644 --- a/vendor/go.uber.org/zap/config.go +++ b/vendor/go.uber.org/zap/config.go @@ -95,6 +95,32 @@ type Config struct { // NewProductionEncoderConfig returns an opinionated EncoderConfig for // production environments. +// +// Messages encoded with this configuration will be JSON-formatted +// and will have the following keys by default: +// +// - "level": The logging level (e.g. "info", "error"). +// - "ts": The current time in number of seconds since the Unix epoch. +// - "msg": The message passed to the log statement. +// - "caller": If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - "stacktrace": If available, a stack trace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted as floating-point number of seconds since the Unix +// epoch. +// - Duration is formatted as floating-point number of seconds. +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewProductionEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder func NewProductionEncoderConfig() zapcore.EncoderConfig { return zapcore.EncoderConfig{ TimeKey: "ts", @@ -112,11 +138,22 @@ func NewProductionEncoderConfig() zapcore.EncoderConfig { } } -// NewProductionConfig is a reasonable production logging configuration. -// Logging is enabled at InfoLevel and above. +// NewProductionConfig builds a reasonable default production logging +// configuration. +// Logging is enabled at InfoLevel and above, and uses a JSON encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of ErrorLevel and above. +// DPanicLevel logs will not panic, but will write a stacktrace. +// +// Sampling is enabled at 100:100 by default, +// meaning that after the first 100 log entries +// with the same level and message in the same second, +// it will log every 100th entry +// with the same level and message in the same second. +// You may disable this behavior by setting Sampling to nil. // -// It uses a JSON encoder, writes to standard error, and enables sampling. -// Stacktraces are automatically included on logs of ErrorLevel and above. +// See [NewProductionEncoderConfig] for information +// on the default encoder configuration. func NewProductionConfig() Config { return Config{ Level: NewAtomicLevelAt(InfoLevel), @@ -134,6 +171,32 @@ func NewProductionConfig() Config { // NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for // development environments. +// +// Messages encoded with this configuration will use Zap's console encoder +// intended to print human-readable output. +// It will print log messages with the following information: +// +// - The log level (e.g. "INFO", "ERROR"). +// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - The message passed to the log statement. +// - If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - If available, a stacktrace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - Duration is formatted as a string (e.g. "1.234s"). +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewDevelopmentEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { return zapcore.EncoderConfig{ // Keys can be anything except the empty string. @@ -152,12 +215,15 @@ func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { } } -// NewDevelopmentConfig is a reasonable development logging configuration. -// Logging is enabled at DebugLevel and above. +// NewDevelopmentConfig builds a reasonable default development logging +// configuration. +// Logging is enabled at DebugLevel and above, and uses a console encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of WarnLevel and above. +// DPanicLevel logs will panic. // -// It enables development mode (which makes DPanicLevel logs panic), uses a -// console encoder, writes to standard error, and disables sampling. -// Stacktraces are automatically included on logs of WarnLevel and above. +// See [NewDevelopmentEncoderConfig] for information +// on the default encoder configuration. func NewDevelopmentConfig() Config { return Config{ Level: NewAtomicLevelAt(DebugLevel), diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go index 65982a51e..45f7b838d 100644 --- a/vendor/go.uber.org/zap/error.go +++ b/vendor/go.uber.org/zap/error.go @@ -21,14 +21,13 @@ package zap import ( - "sync" - + "go.uber.org/zap/internal/pool" "go.uber.org/zap/zapcore" ) -var _errArrayElemPool = sync.Pool{New: func() interface{} { +var _errArrayElemPool = pool.New(func() *errArrayElem { return &errArrayElem{} -}} +}) // Error is shorthand for the common idiom NamedError("error", err). func Error(err error) Field { @@ -60,11 +59,14 @@ func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { // potentially an "errorVerbose" attribute, we need to wrap it in a // type that implements LogObjectMarshaler. To prevent this from // allocating, pool the wrapper type. - elem := _errArrayElemPool.Get().(*errArrayElem) + elem := _errArrayElemPool.Get() elem.error = errs[i] - arr.AppendObject(elem) + err := arr.AppendObject(elem) elem.error = nil _errArrayElemPool.Put(elem) + if err != nil { + return err + } } return nil } diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index bbb745db5..c8dd3358a 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -25,6 +25,7 @@ import ( "math" "time" + "go.uber.org/zap/internal/stacktrace" "go.uber.org/zap/zapcore" ) @@ -374,7 +375,7 @@ func StackSkip(key string, skip int) Field { // from expanding the zapcore.Field union struct to include a byte slice. Since // taking a stacktrace is already so expensive (~10us), the extra allocation // is okay. - return String(key, takeStacktrace(skip+1)) // skip StackSkip + return String(key, stacktrace.Take(skip+1)) // skip StackSkip } // Duration constructs a field with the given key and value. The encoder @@ -410,6 +411,63 @@ func Inline(val zapcore.ObjectMarshaler) Field { } } +// Dict constructs a field containing the provided key-value pairs. +// It acts similar to [Object], but with the fields specified as arguments. +func Dict(key string, val ...Field) Field { + return dictField(key, val) +} + +// We need a function with the signature (string, T) for zap.Any. +func dictField(key string, val []Field) Field { + return Object(key, dictObject(val)) +} + +type dictObject []Field + +func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { + for _, f := range d { + f.AddTo(enc) + } + return nil +} + +// We discovered an issue where zap.Any can cause a performance degradation +// when used in new goroutines. +// +// This happens because the compiler assigns 4.8kb (one zap.Field per arm of +// switch statement) of stack space for zap.Any when it takes the form: +// +// switch v := v.(type) { +// case string: +// return String(key, v) +// case int: +// return Int(key, v) +// // ... +// default: +// return Reflect(key, v) +// } +// +// To avoid this, we use the type switch to assign a value to a single local variable +// and then call a function on it. +// The local variable is just a function reference so it doesn't allocate +// when converted to an interface{}. +// +// A fair bit of experimentation went into this. +// See also: +// +// - https://github.com/uber-go/zap/pull/1301 +// - https://github.com/uber-go/zap/pull/1303 +// - https://github.com/uber-go/zap/pull/1304 +// - https://github.com/uber-go/zap/pull/1305 +// - https://github.com/uber-go/zap/pull/1308 +type anyFieldC[T any] func(string, T) Field + +func (f anyFieldC[T]) Any(key string, val any) Field { + v, _ := val.(T) + // val is guaranteed to be a T, except when it's nil. + return f(key, v) +} + // Any takes a key and an arbitrary value and chooses the best way to represent // them as a field, falling back to a reflection-based approach only if // necessary. @@ -418,132 +476,138 @@ func Inline(val zapcore.ObjectMarshaler) Field { // them. To minimize surprises, []byte values are treated as binary blobs, byte // values are treated as uint8, and runes are always treated as integers. func Any(key string, value interface{}) Field { - switch val := value.(type) { + var c interface{ Any(string, any) Field } + + switch value.(type) { case zapcore.ObjectMarshaler: - return Object(key, val) + c = anyFieldC[zapcore.ObjectMarshaler](Object) case zapcore.ArrayMarshaler: - return Array(key, val) + c = anyFieldC[zapcore.ArrayMarshaler](Array) + case []Field: + c = anyFieldC[[]Field](dictField) case bool: - return Bool(key, val) + c = anyFieldC[bool](Bool) case *bool: - return Boolp(key, val) + c = anyFieldC[*bool](Boolp) case []bool: - return Bools(key, val) + c = anyFieldC[[]bool](Bools) case complex128: - return Complex128(key, val) + c = anyFieldC[complex128](Complex128) case *complex128: - return Complex128p(key, val) + c = anyFieldC[*complex128](Complex128p) case []complex128: - return Complex128s(key, val) + c = anyFieldC[[]complex128](Complex128s) case complex64: - return Complex64(key, val) + c = anyFieldC[complex64](Complex64) case *complex64: - return Complex64p(key, val) + c = anyFieldC[*complex64](Complex64p) case []complex64: - return Complex64s(key, val) + c = anyFieldC[[]complex64](Complex64s) case float64: - return Float64(key, val) + c = anyFieldC[float64](Float64) case *float64: - return Float64p(key, val) + c = anyFieldC[*float64](Float64p) case []float64: - return Float64s(key, val) + c = anyFieldC[[]float64](Float64s) case float32: - return Float32(key, val) + c = anyFieldC[float32](Float32) case *float32: - return Float32p(key, val) + c = anyFieldC[*float32](Float32p) case []float32: - return Float32s(key, val) + c = anyFieldC[[]float32](Float32s) case int: - return Int(key, val) + c = anyFieldC[int](Int) case *int: - return Intp(key, val) + c = anyFieldC[*int](Intp) case []int: - return Ints(key, val) + c = anyFieldC[[]int](Ints) case int64: - return Int64(key, val) + c = anyFieldC[int64](Int64) case *int64: - return Int64p(key, val) + c = anyFieldC[*int64](Int64p) case []int64: - return Int64s(key, val) + c = anyFieldC[[]int64](Int64s) case int32: - return Int32(key, val) + c = anyFieldC[int32](Int32) case *int32: - return Int32p(key, val) + c = anyFieldC[*int32](Int32p) case []int32: - return Int32s(key, val) + c = anyFieldC[[]int32](Int32s) case int16: - return Int16(key, val) + c = anyFieldC[int16](Int16) case *int16: - return Int16p(key, val) + c = anyFieldC[*int16](Int16p) case []int16: - return Int16s(key, val) + c = anyFieldC[[]int16](Int16s) case int8: - return Int8(key, val) + c = anyFieldC[int8](Int8) case *int8: - return Int8p(key, val) + c = anyFieldC[*int8](Int8p) case []int8: - return Int8s(key, val) + c = anyFieldC[[]int8](Int8s) case string: - return String(key, val) + c = anyFieldC[string](String) case *string: - return Stringp(key, val) + c = anyFieldC[*string](Stringp) case []string: - return Strings(key, val) + c = anyFieldC[[]string](Strings) case uint: - return Uint(key, val) + c = anyFieldC[uint](Uint) case *uint: - return Uintp(key, val) + c = anyFieldC[*uint](Uintp) case []uint: - return Uints(key, val) + c = anyFieldC[[]uint](Uints) case uint64: - return Uint64(key, val) + c = anyFieldC[uint64](Uint64) case *uint64: - return Uint64p(key, val) + c = anyFieldC[*uint64](Uint64p) case []uint64: - return Uint64s(key, val) + c = anyFieldC[[]uint64](Uint64s) case uint32: - return Uint32(key, val) + c = anyFieldC[uint32](Uint32) case *uint32: - return Uint32p(key, val) + c = anyFieldC[*uint32](Uint32p) case []uint32: - return Uint32s(key, val) + c = anyFieldC[[]uint32](Uint32s) case uint16: - return Uint16(key, val) + c = anyFieldC[uint16](Uint16) case *uint16: - return Uint16p(key, val) + c = anyFieldC[*uint16](Uint16p) case []uint16: - return Uint16s(key, val) + c = anyFieldC[[]uint16](Uint16s) case uint8: - return Uint8(key, val) + c = anyFieldC[uint8](Uint8) case *uint8: - return Uint8p(key, val) + c = anyFieldC[*uint8](Uint8p) case []byte: - return Binary(key, val) + c = anyFieldC[[]byte](Binary) case uintptr: - return Uintptr(key, val) + c = anyFieldC[uintptr](Uintptr) case *uintptr: - return Uintptrp(key, val) + c = anyFieldC[*uintptr](Uintptrp) case []uintptr: - return Uintptrs(key, val) + c = anyFieldC[[]uintptr](Uintptrs) case time.Time: - return Time(key, val) + c = anyFieldC[time.Time](Time) case *time.Time: - return Timep(key, val) + c = anyFieldC[*time.Time](Timep) case []time.Time: - return Times(key, val) + c = anyFieldC[[]time.Time](Times) case time.Duration: - return Duration(key, val) + c = anyFieldC[time.Duration](Duration) case *time.Duration: - return Durationp(key, val) + c = anyFieldC[*time.Duration](Durationp) case []time.Duration: - return Durations(key, val) + c = anyFieldC[[]time.Duration](Durations) case error: - return NamedError(key, val) + c = anyFieldC[error](NamedError) case []error: - return Errors(key, val) + c = anyFieldC[[]error](Errors) case fmt.Stringer: - return Stringer(key, val) + c = anyFieldC[fmt.Stringer](Stringer) default: - return Reflect(key, val) + c = anyFieldC[any](Reflect) } + + return c.Any(key, value) } diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go index 632b6831a..2be8f6515 100644 --- a/vendor/go.uber.org/zap/http_handler.go +++ b/vendor/go.uber.org/zap/http_handler.go @@ -69,6 +69,13 @@ import ( // // curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if err := lvl.serveHTTP(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "internal error: %v", err) + } +} + +func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error { type errorResponse struct { Error string `json:"error"` } @@ -80,19 +87,20 @@ func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { switch r.Method { case http.MethodGet: - enc.Encode(payload{Level: lvl.Level()}) + return enc.Encode(payload{Level: lvl.Level()}) + case http.MethodPut: requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) if err != nil { w.WriteHeader(http.StatusBadRequest) - enc.Encode(errorResponse{Error: err.Error()}) - return + return enc.Encode(errorResponse{Error: err.Error()}) } lvl.SetLevel(requestedLvl) - enc.Encode(payload{Level: lvl.Level()}) + return enc.Encode(payload{Level: lvl.Level()}) + default: w.WriteHeader(http.StatusMethodNotAllowed) - enc.Encode(errorResponse{ + return enc.Encode(errorResponse{ Error: "Only GET and PUT are supported.", }) } @@ -129,5 +137,4 @@ func decodePutJSON(body io.Reader) (zapcore.Level, error) { return 0, errors.New("must specify logging level") } return *pld.Level, nil - } diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go index 5f3e3f1b9..40bfed81e 100644 --- a/vendor/go.uber.org/zap/internal/level_enabler.go +++ b/vendor/go.uber.org/zap/internal/level_enabler.go @@ -18,6 +18,8 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +// Package internal and its subpackages hold types and functionality +// that are not part of Zap's public API. package internal import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/zap/internal/pool/pool.go similarity index 56% rename from vendor/go.uber.org/atomic/bool_ext.go rename to vendor/go.uber.org/zap/internal/pool/pool.go index a2e60e987..60e9d2c43 100644 --- a/vendor/go.uber.org/atomic/bool_ext.go +++ b/vendor/go.uber.org/zap/internal/pool/pool.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -18,36 +18,41 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package atomic +// Package pool provides internal pool utilities. +package pool import ( - "strconv" + "sync" ) -//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go - -func truthy(n uint32) bool { - return n == 1 +// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed +// object pooling. +// +// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will +// not be detected, so all internal pool use must take care to only store +// pointer types. +type Pool[T any] struct { + pool sync.Pool } -func boolToInt(b bool) uint32 { - if b { - return 1 +// New returns a new [Pool] for T, and will use fn to construct new Ts when +// the pool is empty. +func New[T any](fn func() T) *Pool[T] { + return &Pool[T]{ + pool: sync.Pool{ + New: func() any { + return fn() + }, + }, } - return 0 } -// Toggle atomically negates the Boolean and returns the previous value. -func (b *Bool) Toggle() (old bool) { - for { - old := b.Load() - if b.CAS(old, !old) { - return old - } - } +// Get gets a T from the pool, or creates a new one if the pool is empty. +func (p *Pool[T]) Get() T { + return p.pool.Get().(T) } -// String encodes the wrapped value as a string. -func (b *Bool) String() string { - return strconv.FormatBool(b.Load()) +// Put returns x into the pool. +func (p *Pool[T]) Put(x T) { + p.pool.Put(x) } diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/internal/stacktrace/stack.go similarity index 73% rename from vendor/go.uber.org/zap/stacktrace.go rename to vendor/go.uber.org/zap/internal/stacktrace/stack.go index 817a3bde8..82af7551f 100644 --- a/vendor/go.uber.org/zap/stacktrace.go +++ b/vendor/go.uber.org/zap/internal/stacktrace/stack.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -18,25 +18,26 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package zap +// Package stacktrace provides support for gathering stack traces +// efficiently. +package stacktrace import ( "runtime" - "sync" "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" ) -var _stacktracePool = sync.Pool{ - New: func() interface{} { - return &stacktrace{ - storage: make([]uintptr, 64), - } - }, -} +var _stackPool = pool.New(func() *Stack { + return &Stack{ + storage: make([]uintptr, 64), + } +}) -type stacktrace struct { +// Stack is a captured stack trace. +type Stack struct { pcs []uintptr // program counters; always a subslice of storage frames *runtime.Frames @@ -50,30 +51,30 @@ type stacktrace struct { storage []uintptr } -// stacktraceDepth specifies how deep of a stack trace should be captured. -type stacktraceDepth int +// Depth specifies how deep of a stack trace should be captured. +type Depth int const ( - // stacktraceFirst captures only the first frame. - stacktraceFirst stacktraceDepth = iota + // First captures only the first frame. + First Depth = iota - // stacktraceFull captures the entire call stack, allocating more + // Full captures the entire call stack, allocating more // storage for it if needed. - stacktraceFull + Full ) -// captureStacktrace captures a stack trace of the specified depth, skipping +// Capture captures a stack trace of the specified depth, skipping // the provided number of frames. skip=0 identifies the caller of -// captureStacktrace. +// Capture. // // The caller must call Free on the returned stacktrace after using it. -func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { - stack := _stacktracePool.Get().(*stacktrace) +func Capture(skip int, depth Depth) *Stack { + stack := _stackPool.Get() switch depth { - case stacktraceFirst: + case First: stack.pcs = stack.storage[:1] - case stacktraceFull: + case Full: stack.pcs = stack.storage } @@ -87,7 +88,7 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { // runtime.Callers truncates the recorded stacktrace if there is no // room in the provided slice. For the full stack trace, keep expanding // storage until there are fewer frames than there is room. - if depth == stacktraceFull { + if depth == Full { pcs := stack.pcs for numFrames == len(pcs) { pcs = make([]uintptr, len(pcs)*2) @@ -109,50 +110,54 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { // Free releases resources associated with this stacktrace // and returns it back to the pool. -func (st *stacktrace) Free() { +func (st *Stack) Free() { st.frames = nil st.pcs = nil - _stacktracePool.Put(st) + _stackPool.Put(st) } // Count reports the total number of frames in this stacktrace. // Count DOES NOT change as Next is called. -func (st *stacktrace) Count() int { +func (st *Stack) Count() int { return len(st.pcs) } // Next returns the next frame in the stack trace, // and a boolean indicating whether there are more after it. -func (st *stacktrace) Next() (_ runtime.Frame, more bool) { +func (st *Stack) Next() (_ runtime.Frame, more bool) { return st.frames.Next() } -func takeStacktrace(skip int) string { - stack := captureStacktrace(skip+1, stacktraceFull) +// Take returns a string representation of the current stacktrace. +// +// skip is the number of frames to skip before recording the stack trace. +// skip=0 identifies the caller of Take. +func Take(skip int) string { + stack := Capture(skip+1, Full) defer stack.Free() buffer := bufferpool.Get() defer buffer.Free() - stackfmt := newStackFormatter(buffer) + stackfmt := NewFormatter(buffer) stackfmt.FormatStack(stack) return buffer.String() } -// stackFormatter formats a stack trace into a readable string representation. -type stackFormatter struct { +// Formatter formats a stack trace into a readable string representation. +type Formatter struct { b *buffer.Buffer nonEmpty bool // whehther we've written at least one frame already } -// newStackFormatter builds a new stackFormatter. -func newStackFormatter(b *buffer.Buffer) stackFormatter { - return stackFormatter{b: b} +// NewFormatter builds a new Formatter. +func NewFormatter(b *buffer.Buffer) Formatter { + return Formatter{b: b} } // FormatStack formats all remaining frames in the provided stacktrace -- minus // the final runtime.main/runtime.goexit frame. -func (sf *stackFormatter) FormatStack(stack *stacktrace) { +func (sf *Formatter) FormatStack(stack *Stack) { // Note: On the last iteration, frames.Next() returns false, with a valid // frame, but we ignore this frame. The last frame is a runtime frame which // adds noise, since it's only either runtime.main or runtime.goexit. @@ -162,7 +167,7 @@ func (sf *stackFormatter) FormatStack(stack *stacktrace) { } // FormatFrame formats the given frame. -func (sf *stackFormatter) FormatFrame(frame runtime.Frame) { +func (sf *Formatter) FormatFrame(frame runtime.Frame) { if sf.nonEmpty { sf.b.AppendByte('\n') } diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go index db951e19a..155b208bd 100644 --- a/vendor/go.uber.org/zap/level.go +++ b/vendor/go.uber.org/zap/level.go @@ -21,7 +21,8 @@ package zap import ( - "go.uber.org/atomic" + "sync/atomic" + "go.uber.org/zap/internal" "go.uber.org/zap/zapcore" ) @@ -76,9 +77,9 @@ var _ internal.LeveledEnabler = AtomicLevel{} // NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging // enabled. func NewAtomicLevel() AtomicLevel { - return AtomicLevel{ - l: atomic.NewInt32(int32(InfoLevel)), - } + lvl := AtomicLevel{l: new(atomic.Int32)} + lvl.l.Store(int32(InfoLevel)) + return lvl } // NewAtomicLevelAt is a convenience function that creates an AtomicLevel diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index cd44030d1..6205fe48a 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -27,6 +27,7 @@ import ( "strings" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/stacktrace" "go.uber.org/zap/zapcore" ) @@ -173,7 +174,8 @@ func (log *Logger) WithOptions(opts ...Option) *Logger { } // With creates a child logger and adds structured context to it. Fields added -// to the child don't affect the parent, and vice versa. +// to the child don't affect the parent, and vice versa. Any fields that +// require evaluation (such as Objects) are evaluated upon invocation of With. func (log *Logger) With(fields ...Field) *Logger { if len(fields) == 0 { return log @@ -183,6 +185,28 @@ func (log *Logger) With(fields ...Field) *Logger { return l } +// WithLazy creates a child logger and adds structured context to it lazily. +// +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// WithLazy provides a worthwhile performance optimization for contextual loggers +// when the likelihood of using the child logger is low, +// such as error paths and rarely taken branches. +// +// Similar to [With], fields added to the child don't affect the parent, and vice versa. +func (log *Logger) WithLazy(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewLazyWith(core, fields) + })) +} + // Level reports the minimum enabled level for this logger. // // For NopLoggers, this is [zapcore.InvalidLevel]. @@ -199,6 +223,8 @@ func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Log logs a message at the specified level. The message includes any fields // passed at the log site, as well as any fields accumulated on the logger. +// Any Fields that require evaluation (such as Objects) are evaluated upon +// invocation of Log. func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) { if ce := log.check(lvl, msg); ce != nil { ce.Write(fields...) @@ -281,9 +307,15 @@ func (log *Logger) Core() zapcore.Core { return log.core } +// Name returns the Logger's underlying name, +// or an empty string if the logger is unnamed. +func (log *Logger) Name() string { + return log.name +} + func (log *Logger) clone() *Logger { - copy := *log - return © + clone := *log + return &clone } func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { @@ -354,17 +386,17 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Adding the caller or stack trace requires capturing the callers of // this function. We'll share information between these two. - stackDepth := stacktraceFirst + stackDepth := stacktrace.First if addStack { - stackDepth = stacktraceFull + stackDepth = stacktrace.Full } - stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth) + stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth) defer stack.Free() if stack.Count() == 0 { if log.addCaller { fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) - log.errorOutput.Sync() + _ = log.errorOutput.Sync() } return ce } @@ -385,7 +417,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { buffer := bufferpool.Get() defer buffer.Free() - stackfmt := newStackFormatter(buffer) + stackfmt := stacktrace.NewFormatter(buffer) // We've already extracted the first frame, so format that // separately and defer to stackfmt for the rest. diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go index 478c9a10f..499772a00 100644 --- a/vendor/go.uber.org/zap/sink.go +++ b/vendor/go.uber.org/zap/sink.go @@ -66,7 +66,8 @@ func newSinkRegistry() *sinkRegistry { factories: make(map[string]func(*url.URL) (Sink, error)), openFile: os.OpenFile, } - sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) + // Infallible operation: the registry is empty, so we can't have a conflict. + _ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) return sr } @@ -154,7 +155,7 @@ func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) { case "stderr": return nopCloserSink{os.Stderr}, nil } - return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) + return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666) } func normalizeScheme(s string) (string, error) { diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index ac387b3e4..00ac5fe3a 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -122,74 +122,88 @@ func (s *SugaredLogger) Level() zapcore.Level { return zapcore.LevelOf(s.base.core) } -// Debug uses fmt.Sprint to construct and log a message. +// Debug logs the provided arguments at [DebugLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Debug(args ...interface{}) { s.log(DebugLevel, "", args, nil) } -// Info uses fmt.Sprint to construct and log a message. +// Info logs the provided arguments at [InfoLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Info(args ...interface{}) { s.log(InfoLevel, "", args, nil) } -// Warn uses fmt.Sprint to construct and log a message. +// Warn logs the provided arguments at [WarnLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Warn(args ...interface{}) { s.log(WarnLevel, "", args, nil) } -// Error uses fmt.Sprint to construct and log a message. +// Error logs the provided arguments at [ErrorLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Error(args ...interface{}) { s.log(ErrorLevel, "", args, nil) } -// DPanic uses fmt.Sprint to construct and log a message. In development, the -// logger then panics. (See DPanicLevel for details.) +// DPanic logs the provided arguments at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) DPanic(args ...interface{}) { s.log(DPanicLevel, "", args, nil) } -// Panic uses fmt.Sprint to construct and log a message, then panics. +// Panic constructs a message with the provided arguments and panics. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Panic(args ...interface{}) { s.log(PanicLevel, "", args, nil) } -// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +// Fatal constructs a message with the provided arguments and calls os.Exit. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Fatal(args ...interface{}) { s.log(FatalLevel, "", args, nil) } -// Debugf uses fmt.Sprintf to log a templated message. +// Debugf formats the message according to the format specifier +// and logs it at [DebugLevel]. func (s *SugaredLogger) Debugf(template string, args ...interface{}) { s.log(DebugLevel, template, args, nil) } -// Infof uses fmt.Sprintf to log a templated message. +// Infof formats the message according to the format specifier +// and logs it at [InfoLevel]. func (s *SugaredLogger) Infof(template string, args ...interface{}) { s.log(InfoLevel, template, args, nil) } -// Warnf uses fmt.Sprintf to log a templated message. +// Warnf formats the message according to the format specifier +// and logs it at [WarnLevel]. func (s *SugaredLogger) Warnf(template string, args ...interface{}) { s.log(WarnLevel, template, args, nil) } -// Errorf uses fmt.Sprintf to log a templated message. +// Errorf formats the message according to the format specifier +// and logs it at [ErrorLevel]. func (s *SugaredLogger) Errorf(template string, args ...interface{}) { s.log(ErrorLevel, template, args, nil) } -// DPanicf uses fmt.Sprintf to log a templated message. In development, the -// logger then panics. (See DPanicLevel for details.) +// DPanicf formats the message according to the format specifier +// and logs it at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { s.log(DPanicLevel, template, args, nil) } -// Panicf uses fmt.Sprintf to log a templated message, then panics. +// Panicf formats the message according to the format specifier +// and panics. func (s *SugaredLogger) Panicf(template string, args ...interface{}) { s.log(PanicLevel, template, args, nil) } -// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +// Fatalf formats the message according to the format specifier +// and calls os.Exit. func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { s.log(FatalLevel, template, args, nil) } @@ -241,38 +255,45 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { s.log(FatalLevel, msg, nil, keysAndValues) } -// Debugln uses fmt.Sprintln to construct and log a message. +// Debugln logs a message at [DebugLevel]. +// Spaces are always added between arguments. func (s *SugaredLogger) Debugln(args ...interface{}) { s.logln(DebugLevel, args, nil) } -// Infoln uses fmt.Sprintln to construct and log a message. +// Infoln logs a message at [InfoLevel]. +// Spaces are always added between arguments. func (s *SugaredLogger) Infoln(args ...interface{}) { s.logln(InfoLevel, args, nil) } -// Warnln uses fmt.Sprintln to construct and log a message. +// Warnln logs a message at [WarnLevel]. +// Spaces are always added between arguments. func (s *SugaredLogger) Warnln(args ...interface{}) { s.logln(WarnLevel, args, nil) } -// Errorln uses fmt.Sprintln to construct and log a message. +// Errorln logs a message at [ErrorLevel]. +// Spaces are always added between arguments. func (s *SugaredLogger) Errorln(args ...interface{}) { s.logln(ErrorLevel, args, nil) } -// DPanicln uses fmt.Sprintln to construct and log a message. In development, the -// logger then panics. (See DPanicLevel for details.) +// DPanicln logs a message at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are always added between arguments. func (s *SugaredLogger) DPanicln(args ...interface{}) { s.logln(DPanicLevel, args, nil) } -// Panicln uses fmt.Sprintln to construct and log a message, then panics. +// Panicln logs a message at [PanicLevel] and panics. +// Spaces are always added between arguments. func (s *SugaredLogger) Panicln(args ...interface{}) { s.logln(PanicLevel, args, nil) } -// Fatalln uses fmt.Sprintln to construct and log a message, then calls os.Exit. +// Fatalln logs a message at [FatalLevel] and calls os.Exit. +// Spaces are always added between arguments. func (s *SugaredLogger) Fatalln(args ...interface{}) { s.logln(FatalLevel, args, nil) } diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go index f08728e1e..06768c679 100644 --- a/vendor/go.uber.org/zap/writer.go +++ b/vendor/go.uber.org/zap/writer.go @@ -48,21 +48,21 @@ import ( // os.Stdout and os.Stderr. When specified without a scheme, relative file // paths also work. func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { - writers, close, err := open(paths) + writers, closeAll, err := open(paths) if err != nil { return nil, nil, err } writer := CombineWriteSyncers(writers...) - return writer, close, nil + return writer, closeAll, nil } func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { writers := make([]zapcore.WriteSyncer, 0, len(paths)) closers := make([]io.Closer, 0, len(paths)) - close := func() { + closeAll := func() { for _, c := range closers { - c.Close() + _ = c.Close() } } @@ -77,11 +77,11 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { closers = append(closers, sink) } if openErr != nil { - close() + closeAll() return nil, nil, openErr } - return writers, close, nil + return writers, closeAll, nil } // CombineWriteSyncers is a utility that combines multiple WriteSyncers into a diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index 1aa5dc364..8ca0bfaf5 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -22,20 +22,20 @@ package zapcore import ( "fmt" - "sync" "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" ) -var _sliceEncoderPool = sync.Pool{ - New: func() interface{} { - return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} - }, -} +var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder { + return &sliceArrayEncoder{ + elems: make([]interface{}, 0, 2), + } +}) func getSliceEncoder() *sliceArrayEncoder { - return _sliceEncoderPool.Get().(*sliceArrayEncoder) + return _sliceEncoderPool.Get() } func putSliceEncoder(e *sliceArrayEncoder) { diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go index 9dfd64051..776e93f6f 100644 --- a/vendor/go.uber.org/zap/zapcore/core.go +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -102,9 +102,9 @@ func (c *ioCore) Write(ent Entry, fields []Field) error { return err } if ent.Level > ErrorLevel { - // Since we may be crashing the program, sync the output. Ignore Sync - // errors, pending a clean solution to issue #370. - c.Sync() + // Since we may be crashing the program, sync the output. + // Ignore Sync errors, pending a clean solution to issue #370. + _ = c.Sync() } return nil } diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go index 9d326e95e..459a5d7ce 100644 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -24,25 +24,23 @@ import ( "fmt" "runtime" "strings" - "sync" "time" "go.uber.org/multierr" "go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/internal/exit" + "go.uber.org/zap/internal/pool" ) -var ( - _cePool = sync.Pool{New: func() interface{} { - // Pre-allocate some space for cores. - return &CheckedEntry{ - cores: make([]Core, 4), - } - }} -) +var _cePool = pool.New(func() *CheckedEntry { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } +}) func getCheckedEntry() *CheckedEntry { - ce := _cePool.Get().(*CheckedEntry) + ce := _cePool.Get() ce.reset() return ce } @@ -244,7 +242,7 @@ func (ce *CheckedEntry) Write(fields ...Field) { // CheckedEntry is being used after it was returned to the pool, // the message may be an amalgamation from multiple call sites. fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) - ce.ErrorOutput.Sync() + _ = ce.ErrorOutput.Sync() // ignore error } return } @@ -256,7 +254,7 @@ func (ce *CheckedEntry) Write(fields ...Field) { } if err != nil && ce.ErrorOutput != nil { fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) - ce.ErrorOutput.Sync() + _ = ce.ErrorOutput.Sync() // ignore error } hook := ce.after diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go index 06359907a..c40df1326 100644 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -23,7 +23,8 @@ package zapcore import ( "fmt" "reflect" - "sync" + + "go.uber.org/zap/internal/pool" ) // Encodes the given error into fields of an object. A field with the given @@ -97,15 +98,18 @@ func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { } el := newErrArrayElem(errs[i]) - arr.AppendObject(el) + err := arr.AppendObject(el) el.Free() + if err != nil { + return err + } } return nil } -var _errArrayElemPool = sync.Pool{New: func() interface{} { +var _errArrayElemPool = pool.New(func() *errArrayElem { return &errArrayElem{} -}} +}) // Encodes any error into a {"error": ...} re-using the same errors logic. // @@ -113,7 +117,7 @@ var _errArrayElemPool = sync.Pool{New: func() interface{} { type errArrayElem struct{ err error } func newErrArrayElem(err error) *errArrayElem { - e := _errArrayElemPool.Get().(*errArrayElem) + e := _errArrayElemPool.Get() e.err = err return e } diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index 3921c5cd3..c8ab86979 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -23,24 +23,20 @@ package zapcore import ( "encoding/base64" "math" - "sync" "time" "unicode/utf8" "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" ) // For JSON-escaping; see jsonEncoder.safeAddString below. const _hex = "0123456789abcdef" -var _jsonPool = sync.Pool{New: func() interface{} { +var _jsonPool = pool.New(func() *jsonEncoder { return &jsonEncoder{} -}} - -func getJSONEncoder() *jsonEncoder { - return _jsonPool.Get().(*jsonEncoder) -} +}) func putJSONEncoder(enc *jsonEncoder) { if enc.reflectBuf != nil { @@ -354,7 +350,7 @@ func (enc *jsonEncoder) Clone() Encoder { } func (enc *jsonEncoder) clone() *jsonEncoder { - clone := getJSONEncoder() + clone := _jsonPool.Get() clone.EncoderConfig = enc.EncoderConfig clone.spaced = enc.spaced clone.openNamespaces = enc.openNamespaces @@ -490,73 +486,98 @@ func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { // Unlike the standard library's encoder, it doesn't attempt to protect the // user from browser vulnerabilities or JSONP-related problems. func (enc *jsonEncoder) safeAddString(s string) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRuneInString(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.AppendString(s[i : i+size]) - i += size - } + safeAppendStringLike( + (*buffer.Buffer).AppendString, + utf8.DecodeRuneInString, + enc.buf, + s, + ) } // safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. func (enc *jsonEncoder) safeAddByteString(s []byte) { + safeAppendStringLike( + (*buffer.Buffer).AppendBytes, + utf8.DecodeRune, + enc.buf, + s, + ) +} + +// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString. +// It appends a string or byte slice to the buffer, escaping all special characters. +func safeAppendStringLike[S []byte | string]( + // appendTo appends this string-like object to the buffer. + appendTo func(*buffer.Buffer, S), + // decodeRune decodes the next rune from the string-like object + // and returns its value and width in bytes. + decodeRune func(S) (rune, int), + buf *buffer.Buffer, + s S, +) { + // The encoding logic below works by skipping over characters + // that can be safely copied as-is, + // until a character is found that needs special handling. + // At that point, we copy everything we've seen so far, + // and then handle that special character. + // + // last is the index of the last byte that was copied to the buffer. + last := 0 for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { + if s[i] >= utf8.RuneSelf { + // Character >= RuneSelf may be part of a multi-byte rune. + // They need to be decoded before we can decide how to handle them. + r, size := decodeRune(s[i:]) + if r != utf8.RuneError || size != 1 { + // No special handling required. + // Skip over this rune and continue. + i += size + continue + } + + // Invalid UTF-8 sequence. + // Replace it with the Unicode replacement character. + appendTo(buf, s[last:i]) + buf.AppendString(`\ufffd`) + i++ - continue - } - r, size := utf8.DecodeRune(s[i:]) - if enc.tryAddRuneError(r, size) { + last = i + } else { + // Character < RuneSelf is a single-byte UTF-8 rune. + if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' { + // No escaping necessary. + // Skip over this character and continue. + i++ + continue + } + + // This character needs to be escaped. + appendTo(buf, s[last:i]) + switch s[i] { + case '\\', '"': + buf.AppendByte('\\') + buf.AppendByte(s[i]) + case '\n': + buf.AppendByte('\\') + buf.AppendByte('n') + case '\r': + buf.AppendByte('\\') + buf.AppendByte('r') + case '\t': + buf.AppendByte('\\') + buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + buf.AppendString(`\u00`) + buf.AppendByte(_hex[s[i]>>4]) + buf.AppendByte(_hex[s[i]&0xF]) + } + i++ - continue + last = i } - enc.buf.Write(s[i : i+size]) - i += size - } -} - -// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. -func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { - if b >= utf8.RuneSelf { - return false - } - if 0x20 <= b && b != '\\' && b != '"' { - enc.buf.AppendByte(b) - return true - } - switch b { - case '\\', '"': - enc.buf.AppendByte('\\') - enc.buf.AppendByte(b) - case '\n': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('n') - case '\r': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('r') - case '\t': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('t') - default: - // Encode bytes < 0x20, except for the escape sequences above. - enc.buf.AppendString(`\u00`) - enc.buf.AppendByte(_hex[b>>4]) - enc.buf.AppendByte(_hex[b&0xF]) } - return true -} -func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { - if r == utf8.RuneError && size == 1 { - enc.buf.AppendString(`\ufffd`) - return true - } - return false + // add remaining + appendTo(buf, s[last:]) } diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go similarity index 60% rename from vendor/go.uber.org/atomic/time.go rename to vendor/go.uber.org/zap/zapcore/lazy_with.go index 1660feb14..05288d6a8 100644 --- a/vendor/go.uber.org/atomic/time.go +++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go @@ -1,6 +1,4 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,36 +18,37 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package atomic - -import ( - "time" -) +package zapcore -// Time is an atomic type-safe wrapper for time.Time values. -type Time struct { - _ nocmp // disallow non-atomic comparison +import "sync" - v Value +type lazyWithCore struct { + Core + sync.Once + fields []Field } -var _zeroTime time.Time - -// NewTime creates a new Time. -func NewTime(val time.Time) *Time { - x := &Time{} - if val != _zeroTime { - x.Store(val) +// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if +// the logger is written to (or is further chained in a lon-lazy manner). +func NewLazyWith(core Core, fields []Field) Core { + return &lazyWithCore{ + Core: core, + fields: fields, } - return x } -// Load atomically loads the wrapped time.Time. -func (x *Time) Load() time.Time { - return unpackTime(x.v.Load()) +func (d *lazyWithCore) initOnce() { + d.Once.Do(func() { + d.Core = d.Core.With(d.fields) + }) +} + +func (d *lazyWithCore) With(fields []Field) Core { + d.initOnce() + return d.Core.With(fields) } -// Store atomically stores the passed time.Time. -func (x *Time) Store(val time.Time) { - x.v.Store(packTime(val)) +func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry { + d.initOnce() + return d.Core.Check(e, ce) } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go index dc518055a..b7c093a4f 100644 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -21,9 +21,8 @@ package zapcore import ( + "sync/atomic" "time" - - "go.uber.org/atomic" ) const ( @@ -66,16 +65,16 @@ func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { tn := t.UnixNano() resetAfter := c.resetAt.Load() if resetAfter > tn { - return c.counter.Inc() + return c.counter.Add(1) } c.counter.Store(1) newResetAfter := tn + tick.Nanoseconds() - if !c.resetAt.CAS(resetAfter, newResetAfter) { + if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) { // We raced with another goroutine trying to reset, and it also reset // the counter to 1, so we need to reincrement the counter. - return c.counter.Inc() + return c.counter.Add(1) } return 1 diff --git a/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go index 71ca30b51..6823773b7 100644 --- a/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go +++ b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go @@ -30,10 +30,10 @@ import ( // See https://github.com/grpc/grpc-go/blob/v1.35.0/grpclog/loggerv2.go#L77-L86 const ( - grpcLvlInfo = 0 - grpcLvlWarn = 1 - grpcLvlError = 2 - grpcLvlFatal = 3 + grpcLvlInfo int = iota + grpcLvlWarn + grpcLvlError + grpcLvlFatal ) var ( diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go index 8473fb792..405183098 100644 --- a/vendor/golang.org/x/sync/singleflight/singleflight.go +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -31,6 +31,15 @@ func (p *panicError) Error() string { return fmt.Sprintf("%v\n\n%s", p.value, p.stack) } +func (p *panicError) Unwrap() error { + err, ok := p.value.(error) + if !ok { + return nil + } + + return err +} + func newPanicError(v interface{}) error { stack := debug.Stack() diff --git a/vendor/modules.txt b/vendor/modules.txt index bdab69616..15d74c63b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -505,11 +505,10 @@ go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 # go.uber.org/atomic v1.10.0 ## explicit; go 1.18 -go.uber.org/atomic # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# go.uber.org/zap v1.24.0 +# go.uber.org/zap v1.26.0 ## explicit; go 1.19 go.uber.org/zap go.uber.org/zap/buffer @@ -517,6 +516,8 @@ go.uber.org/zap/internal go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit +go.uber.org/zap/internal/pool +go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc # golang.org/x/crypto v0.14.0 @@ -553,8 +554,8 @@ golang.org/x/net/websocket ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.2.0 -## explicit +# golang.org/x/sync v0.5.0 +## explicit; go 1.18 golang.org/x/sync/singleflight # golang.org/x/sys v0.13.0 ## explicit; go 1.17 @@ -1444,7 +1445,7 @@ open-cluster-management.io/addon-framework/pkg/index open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner open-cluster-management.io/addon-framework/pkg/utils -# open-cluster-management.io/api v0.12.1-0.20231027024433-bab1208e6889 +# open-cluster-management.io/api v0.12.1-0.20231109164634-c10ed7e097aa ## explicit; go 1.19 open-cluster-management.io/api/addon/v1alpha1 open-cluster-management.io/api/client/addon/clientset/versioned diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml index a42b5f57c..e08074e70 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml @@ -145,24 +145,69 @@ spec: defined in ClusterManagementAddOn. properties: all: - description: All define required fields for RolloutStrategy + description: All defines required fields for RolloutStrategy type All properties: + maxFailures: + anyOf: + - type: integer + - type: string + default: "0" + description: MaxFailures is a percentage or number + of clusters in the current rollout that can fail + before proceeding to the next rollout. MaxFailures + is only considered for rollout types Progressive + and ProgressivePerGroup. For Progressive, this + is considered over the total number of clusters. + For ProgressivePerGroup, this is considered according + to the size of the current group. For both Progressive + and ProgressivePerGroup, the MaxFailures does + not apply for MandatoryDecisionGroups, which tolerate + no failures. Default is that no failures are tolerated. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + minSuccessTime: + default: "0" + description: MinSuccessTime is a "soak" time. In + other words, the minimum amount of time the workload + applier controller will wait from the start of + each rollout before proceeding (assuming a successful + state has been reached and MaxFailures wasn't + breached). MinSuccessTime is only considered for + rollout types Progressive and ProgressivePerGroup. + The default value is 0 meaning the workload applier + proceeds immediately after a successful state + is reached. MinSuccessTime must be defined in + [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m + , 360s + type: string + progressDeadline: + default: None + description: ProgressDeadline defines how long workload + applier controller will wait for the workload + to reach a successful state in the cluster. ProgressDeadline + default value is "None", meaning the workload + applier will wait for a successful state indefinitely. + ProgressDeadline must be defined in [0-9h]|[0-9m]|[0-9s] + format examples; 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string timeout: default: None - description: Timeout define how long workload applier - controller will wait till workload reach successful - state in the cluster. Timeout default value is - None meaning the workload applier will not proceed - apply workload to other clusters if did not reach - the successful state. Timeout must be defined - in [0-9h]|[0-9m]|[0-9s] format examples; 2h , - 90m , 360s + description: "Timeout defines how long the workload + applier controller will wait until the workload + reaches a successful state in the cluster. Timeout + default value is None meaning the workload applier + will not proceed apply workload to other clusters + if did not reach the successful state. Timeout + must be defined in [0-9h]|[0-9m]|[0-9s] format + examples; 2h , 90m , 360s \n Deprecated: Use ProgressDeadline + instead." pattern: ^(([0-9])+[h|m|s])|None$ type: string type: object progressive: - description: Progressive define required fields for + description: Progressive defines required fields for RolloutStrategy type Progressive properties: mandatoryDecisionGroups: @@ -200,21 +245,66 @@ spec: placement->DecisionStrategy. pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ x-kubernetes-int-or-string: true + maxFailures: + anyOf: + - type: integer + - type: string + default: "0" + description: MaxFailures is a percentage or number + of clusters in the current rollout that can fail + before proceeding to the next rollout. MaxFailures + is only considered for rollout types Progressive + and ProgressivePerGroup. For Progressive, this + is considered over the total number of clusters. + For ProgressivePerGroup, this is considered according + to the size of the current group. For both Progressive + and ProgressivePerGroup, the MaxFailures does + not apply for MandatoryDecisionGroups, which tolerate + no failures. Default is that no failures are tolerated. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + minSuccessTime: + default: "0" + description: MinSuccessTime is a "soak" time. In + other words, the minimum amount of time the workload + applier controller will wait from the start of + each rollout before proceeding (assuming a successful + state has been reached and MaxFailures wasn't + breached). MinSuccessTime is only considered for + rollout types Progressive and ProgressivePerGroup. + The default value is 0 meaning the workload applier + proceeds immediately after a successful state + is reached. MinSuccessTime must be defined in + [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m + , 360s + type: string + progressDeadline: + default: None + description: ProgressDeadline defines how long workload + applier controller will wait for the workload + to reach a successful state in the cluster. ProgressDeadline + default value is "None", meaning the workload + applier will wait for a successful state indefinitely. + ProgressDeadline must be defined in [0-9h]|[0-9m]|[0-9s] + format examples; 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string timeout: default: None - description: Timeout define how long workload applier - controller will wait till workload reach successful - state in the cluster. Timeout default value is - None meaning the workload applier will not proceed - apply workload to other clusters if did not reach - the successful state. Timeout must be defined - in [0-9h]|[0-9m]|[0-9s] format examples; 2h , - 90m , 360s + description: "Timeout defines how long the workload + applier controller will wait until the workload + reaches a successful state in the cluster. Timeout + default value is None meaning the workload applier + will not proceed apply workload to other clusters + if did not reach the successful state. Timeout + must be defined in [0-9h]|[0-9m]|[0-9s] format + examples; 2h , 90m , 360s \n Deprecated: Use ProgressDeadline + instead." pattern: ^(([0-9])+[h|m|s])|None$ type: string type: object progressivePerGroup: - description: ProgressivePerGroup define required fields + description: ProgressivePerGroup defines required fields for RolloutStrategy type ProgressivePerGroup properties: mandatoryDecisionGroups: @@ -241,33 +331,66 @@ spec: type: string type: object type: array + maxFailures: + anyOf: + - type: integer + - type: string + default: "0" + description: MaxFailures is a percentage or number + of clusters in the current rollout that can fail + before proceeding to the next rollout. MaxFailures + is only considered for rollout types Progressive + and ProgressivePerGroup. For Progressive, this + is considered over the total number of clusters. + For ProgressivePerGroup, this is considered according + to the size of the current group. For both Progressive + and ProgressivePerGroup, the MaxFailures does + not apply for MandatoryDecisionGroups, which tolerate + no failures. Default is that no failures are tolerated. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + minSuccessTime: + default: "0" + description: MinSuccessTime is a "soak" time. In + other words, the minimum amount of time the workload + applier controller will wait from the start of + each rollout before proceeding (assuming a successful + state has been reached and MaxFailures wasn't + breached). MinSuccessTime is only considered for + rollout types Progressive and ProgressivePerGroup. + The default value is 0 meaning the workload applier + proceeds immediately after a successful state + is reached. MinSuccessTime must be defined in + [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m + , 360s + type: string + progressDeadline: + default: None + description: ProgressDeadline defines how long workload + applier controller will wait for the workload + to reach a successful state in the cluster. ProgressDeadline + default value is "None", meaning the workload + applier will wait for a successful state indefinitely. + ProgressDeadline must be defined in [0-9h]|[0-9m]|[0-9s] + format examples; 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string timeout: default: None - description: Timeout define how long workload applier - controller will wait till workload reach successful - state in the cluster. Timeout default value is - None meaning the workload applier will not proceed - apply workload to other clusters if did not reach - the successful state. Timeout must be defined - in [0-9h]|[0-9m]|[0-9s] format examples; 2h , - 90m , 360s + description: "Timeout defines how long the workload + applier controller will wait until the workload + reaches a successful state in the cluster. Timeout + default value is None meaning the workload applier + will not proceed apply workload to other clusters + if did not reach the successful state. Timeout + must be defined in [0-9h]|[0-9m]|[0-9s] format + examples; 2h , 90m , 360s \n Deprecated: Use ProgressDeadline + instead." pattern: ^(([0-9])+[h|m|s])|None$ type: string type: object type: default: All - description: Rollout strategy Types are All, Progressive - and ProgressivePerGroup 1) All means apply the workload - to all clusters in the decision groups at once. 2) - Progressive means apply the workload to the selected - clusters progressively per cluster. The workload will - not be applied to the next cluster unless one of the - current applied clusters reach the successful state - or timeout. 3) ProgressivePerGroup means apply the - workload to decisionGroup clusters progressively per - group. The workload will not be applied to the next - decisionGroup unless all clusters in the current group - reach the successful state or timeout. enum: - All - Progressive diff --git a/vendor/open-cluster-management.io/api/cluster/v1alpha1/helpers.go b/vendor/open-cluster-management.io/api/cluster/v1alpha1/helpers.go index 7af8bd1b7..8aaa6ab31 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1alpha1/helpers.go +++ b/vendor/open-cluster-management.io/api/cluster/v1alpha1/helpers.go @@ -46,13 +46,16 @@ type ClusterRolloutStatus struct { // Status is the required field indicating the rollout status. Status RolloutStatus // LastTransitionTime is the last transition time of the rollout status (optional field). - // Used to calculate timeout for progressing and failed status. + // Used to calculate timeout for progressing and failed status and minimum success time (i.e. soak + // time) for succeeded status. LastTransitionTime *metav1.Time // TimeOutTime is the timeout time when the status is progressing or failed (optional field). TimeOutTime *metav1.Time } -// RolloutResult contains list of clusters that are timeOut, removed and required to rollOut +// RolloutResult contains list of clusters that are timeOut, removed and required to rollOut. A +// boolean is also provided signaling that the rollout may be shortened due to the number of failed +// clusters exceeding the MaxFailure threshold. type RolloutResult struct { // ClustersToRollout is a slice of ClusterRolloutStatus that will be rolled out. ClustersToRollout []ClusterRolloutStatus @@ -60,6 +63,8 @@ type RolloutResult struct { ClustersTimeOut []ClusterRolloutStatus // ClustersRemoved is a slice of ClusterRolloutStatus that are removed. ClustersRemoved []ClusterRolloutStatus + // MaxFailureBreach is a boolean signaling whether the rollout was cut short because of failed clusters. + MaxFailureBreach bool } // ClusterRolloutStatusFunc defines a function that return the rollout status for a given workload. @@ -75,7 +80,7 @@ type RolloutHandler[T any] struct { statusFunc ClusterRolloutStatusFunc[T] } -// NewRolloutHandler creates a new RolloutHandler with the give workload type. +// NewRolloutHandler creates a new RolloutHandler with the given workload type. func NewRolloutHandler[T any](pdTracker *clusterv1beta1.PlacementDecisionClustersTracker, statusFunc ClusterRolloutStatusFunc[T]) (*RolloutHandler[T], error) { if pdTracker == nil { return nil, fmt.Errorf("invalid placement decision tracker %v", pdTracker) @@ -84,7 +89,7 @@ func NewRolloutHandler[T any](pdTracker *clusterv1beta1.PlacementDecisionCluster return &RolloutHandler[T]{pdTracker: pdTracker, statusFunc: statusFunc}, nil } -// The input are a RolloutStrategy and existingClusterRolloutStatus list. +// The inputs are a RolloutStrategy and existingClusterRolloutStatus list. // The existing ClusterRolloutStatus list should be created using the ClusterRolloutStatusFunc to determine the current workload rollout status. // The existing ClusterRolloutStatus list should contain all the current workloads rollout status such as ToApply, Progressing, Succeeded, // Failed, TimeOut and Skip in order to determine the added, removed, timeout clusters and next clusters to rollout. @@ -112,7 +117,7 @@ func (r *RolloutHandler[T]) getRolloutAllClusters(rolloutStrategy RolloutStrateg } // Parse timeout for the rollout - failureTimeout, err := parseTimeout(strategy.All.Timeout.Timeout) + failureTimeout, err := parseTimeout(strategy.All.ProgressDeadline) if err != nil { return &strategy, RolloutResult{}, err } @@ -122,7 +127,7 @@ func (r *RolloutHandler[T]) getRolloutAllClusters(rolloutStrategy RolloutStrateg // Check for removed Clusters currentClusterStatus, removedClusterStatus := r.getRemovedClusters(allClusterGroups, existingClusterStatus) - rolloutResult := progressivePerCluster(allClusterGroups, len(allClusters), failureTimeout, currentClusterStatus) + rolloutResult := progressivePerCluster(allClusterGroups, len(allClusters), len(allClusters), time.Duration(0), failureTimeout, currentClusterStatus) rolloutResult.ClustersRemoved = removedClusterStatus return &strategy, rolloutResult, nil @@ -135,9 +140,10 @@ func (r *RolloutHandler[T]) getProgressiveClusters(rolloutStrategy RolloutStrate if strategy.Progressive == nil { strategy.Progressive = &RolloutProgressive{} } + minSuccessTime := strategy.Progressive.MinSuccessTime.Duration // Parse timeout for non-mandatory decision groups - failureTimeout, err := parseTimeout(strategy.Progressive.Timeout.Timeout) + failureTimeout, err := parseTimeout(strategy.Progressive.ProgressDeadline) if err != nil { return &strategy, RolloutResult{}, err } @@ -146,13 +152,21 @@ func (r *RolloutHandler[T]) getProgressiveClusters(rolloutStrategy RolloutStrate clusterGroups := r.pdTracker.ExistingClusterGroupsBesides() currentClusterStatus, removedClusterStatus := r.getRemovedClusters(clusterGroups, existingClusterStatus) + // Parse maximum failure threshold for continuing the rollout, defaulting to zero + maxFailures, err := calculateRolloutSize(strategy.Progressive.MaxFailures, len(clusterGroups.GetClusters()), 0) + if err != nil { + return &strategy, RolloutResult{}, fmt.Errorf("failed to parse the provided maxFailures: %w", err) + } + // Upgrade mandatory decision groups first groupKeys := decisionGroupsToGroupKeys(strategy.Progressive.MandatoryDecisionGroups.MandatoryDecisionGroups) clusterGroups = r.pdTracker.ExistingClusterGroups(groupKeys...) - // Perform progressive rollOut for mandatory decision groups first. + // Perform progressive rollOut for mandatory decision groups first, tolerating no failures if len(clusterGroups) > 0 { - rolloutResult := progressivePerGroup(clusterGroups, failureTimeout, currentClusterStatus) + rolloutResult := progressivePerGroup( + clusterGroups, intstr.FromInt(0), minSuccessTime, failureTimeout, currentClusterStatus, + ) if len(rolloutResult.ClustersToRollout) > 0 || len(rolloutResult.ClustersTimeOut) > 0 { rolloutResult.ClustersRemoved = removedClusterStatus return &strategy, rolloutResult, nil @@ -162,13 +176,13 @@ func (r *RolloutHandler[T]) getProgressiveClusters(rolloutStrategy RolloutStrate // Calculate the size of progressive rollOut // If the MaxConcurrency not defined, total clusters length is considered as maxConcurrency. clusterGroups = r.pdTracker.ExistingClusterGroupsBesides(groupKeys...) - length, err := calculateRolloutSize(strategy.Progressive.MaxConcurrency, len(clusterGroups.GetClusters())) + rolloutSize, err := calculateRolloutSize(strategy.Progressive.MaxConcurrency, len(clusterGroups.GetClusters()), len(clusterGroups.GetClusters())) if err != nil { - return &strategy, RolloutResult{}, err + return &strategy, RolloutResult{}, fmt.Errorf("failed to parse the provided maxConcurrency: %w", err) } // Rollout the remaining clusters - rolloutResult := progressivePerCluster(clusterGroups, length, failureTimeout, currentClusterStatus) + rolloutResult := progressivePerCluster(clusterGroups, rolloutSize, maxFailures, minSuccessTime, failureTimeout, currentClusterStatus) rolloutResult.ClustersRemoved = removedClusterStatus return &strategy, rolloutResult, nil @@ -181,13 +195,21 @@ func (r *RolloutHandler[T]) getProgressivePerGroupClusters(rolloutStrategy Rollo if strategy.ProgressivePerGroup == nil { strategy.ProgressivePerGroup = &RolloutProgressivePerGroup{} } + minSuccessTime := rolloutStrategy.ProgressivePerGroup.MinSuccessTime.Duration + maxFailures := rolloutStrategy.ProgressivePerGroup.MaxFailures // Parse timeout for non-mandatory decision groups - failureTimeout, err := parseTimeout(strategy.ProgressivePerGroup.Timeout.Timeout) + failureTimeout, err := parseTimeout(strategy.ProgressivePerGroup.ProgressDeadline) if err != nil { return &strategy, RolloutResult{}, err } + // Check format of MaxFailures--this value will be re-parsed and used in progressivePerGroup() + err = parseRolloutSize(maxFailures) + if err != nil { + return &strategy, RolloutResult{}, fmt.Errorf("failed to parse the provided maxFailures: %w", err) + } + // Check for removed Clusters clusterGroups := r.pdTracker.ExistingClusterGroupsBesides() currentClusterStatus, removedClusterStatus := r.getRemovedClusters(clusterGroups, existingClusterStatus) @@ -197,9 +219,9 @@ func (r *RolloutHandler[T]) getProgressivePerGroupClusters(rolloutStrategy Rollo groupKeys := decisionGroupsToGroupKeys(mandatoryDecisionGroups) clusterGroups = r.pdTracker.ExistingClusterGroups(groupKeys...) - // Perform progressive rollout per group for mandatory decision groups first + // Perform progressive rollout per group for mandatory decision groups first, tolerating no failures if len(clusterGroups) > 0 { - rolloutResult := progressivePerGroup(clusterGroups, failureTimeout, currentClusterStatus) + rolloutResult := progressivePerGroup(clusterGroups, intstr.FromInt(0), minSuccessTime, failureTimeout, currentClusterStatus) if len(rolloutResult.ClustersToRollout) > 0 || len(rolloutResult.ClustersTimeOut) > 0 { rolloutResult.ClustersRemoved = removedClusterStatus @@ -211,7 +233,7 @@ func (r *RolloutHandler[T]) getProgressivePerGroupClusters(rolloutStrategy Rollo restClusterGroups := r.pdTracker.ExistingClusterGroupsBesides(groupKeys...) // Perform progressive rollout per group for the remaining decision groups - rolloutResult := progressivePerGroup(restClusterGroups, failureTimeout, currentClusterStatus) + rolloutResult := progressivePerGroup(restClusterGroups, maxFailures, minSuccessTime, failureTimeout, currentClusterStatus) rolloutResult.ClustersRemoved = removedClusterStatus return &strategy, rolloutResult, nil @@ -238,35 +260,78 @@ func (r *RolloutHandler[T]) getRemovedClusters(clusterGroupsMap clusterv1beta1.C return currentClusterStatus, removedClusterStatus } -func progressivePerCluster(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, length int, timeout time.Duration, existingClusterStatus []ClusterRolloutStatus) RolloutResult { +// progressivePerCluster parses the rollout status for the given clusters and returns the rollout +// result. It sorts the clusters alphabetically in order to determine the rollout groupings and the +// rollout group size is determined by the MaxConcurrency setting. +func progressivePerCluster( + clusterGroupsMap clusterv1beta1.ClusterGroupsMap, + rolloutSize int, + maxFailures int, + minSuccessTime time.Duration, + timeout time.Duration, + existingClusterStatus []ClusterRolloutStatus, +) RolloutResult { var rolloutClusters, timeoutClusters []ClusterRolloutStatus existingClusters := make(map[string]bool) + failureCount := 0 + failureBreach := false + + // Sort existing cluster status for consistency in case ToApply was determined by the workload applier + sort.Slice(existingClusterStatus, func(i, j int) bool { + return existingClusterStatus[i].ClusterName < existingClusterStatus[j].ClusterName + }) + // Collect current cluster status and determine any TimeOut statuses for _, status := range existingClusterStatus { if status.ClusterName == "" { continue } existingClusters[status.ClusterName] = true - rolloutClusters, timeoutClusters = determineRolloutStatus(status, timeout, rolloutClusters, timeoutClusters) - if len(rolloutClusters) >= length { + // If there was a breach of MaxFailures, only handle clusters that have already had workload applied + if !failureBreach || failureBreach && status.Status != ToApply { + rolloutClusters, timeoutClusters = determineRolloutStatus(&status, minSuccessTime, timeout, rolloutClusters, timeoutClusters) + } + + // Keep track of TimeOut or Failed clusters and check total against MaxFailures + if status.Status == TimeOut || status.Status == Failed { + failureCount++ + + failureBreach = failureCount > maxFailures + } + + // Return if the list of rollout clusters has reached the target rollout size + if len(rolloutClusters) >= rolloutSize { return RolloutResult{ ClustersToRollout: rolloutClusters, ClustersTimeOut: timeoutClusters, + MaxFailureBreach: failureBreach, } } } + if failureBreach { + return RolloutResult{ + ClustersToRollout: rolloutClusters, + ClustersTimeOut: timeoutClusters, + MaxFailureBreach: failureBreach, + } + } + clusters := clusterGroupsMap.GetClusters().UnsortedList() clusterToGroupKey := clusterGroupsMap.ClusterToGroupKey() + // Sort the clusters in alphabetical order to ensure consistency. sort.Strings(clusters) + + // Amend clusters to the rollout up to the rollout size for _, cluster := range clusters { if existingClusters[cluster] { continue } + // For clusters without a rollout status, set the status to ToApply status := ClusterRolloutStatus{ ClusterName: cluster, Status: ToApply, @@ -274,7 +339,8 @@ func progressivePerCluster(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, len } rolloutClusters = append(rolloutClusters, status) - if len(rolloutClusters) >= length { + // Return if the list of rollout clusters has reached the target rollout size + if len(rolloutClusters) >= rolloutSize { return RolloutResult{ ClustersToRollout: rolloutClusters, ClustersTimeOut: timeoutClusters, @@ -288,32 +354,49 @@ func progressivePerCluster(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, len } } -func progressivePerGroup(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, timeout time.Duration, existingClusterStatus []ClusterRolloutStatus) RolloutResult { +func progressivePerGroup( + clusterGroupsMap clusterv1beta1.ClusterGroupsMap, + maxFailures intstr.IntOrString, + minSuccessTime time.Duration, + timeout time.Duration, + existingClusterStatus []ClusterRolloutStatus, +) RolloutResult { var rolloutClusters, timeoutClusters []ClusterRolloutStatus - existingClusters := make(map[string]bool) + existingClusters := make(map[string]RolloutStatus) for _, status := range existingClusterStatus { if status.ClusterName == "" { continue } - if status.Status == ToApply { - // Set as false to consider the cluster in the decisionGroups iteration. - existingClusters[status.ClusterName] = false - } else { - existingClusters[status.ClusterName] = true - rolloutClusters, timeoutClusters = determineRolloutStatus(status, timeout, rolloutClusters, timeoutClusters) + // ToApply will be reconsidered in the decisionGroups iteration. + if status.Status != ToApply { + rolloutClusters, timeoutClusters = determineRolloutStatus(&status, minSuccessTime, timeout, rolloutClusters, timeoutClusters) + existingClusters[status.ClusterName] = status.Status } } + totalFailureCount := 0 + failureBreach := false clusterGroupKeys := clusterGroupsMap.GetOrderedGroupKeys() for _, key := range clusterGroupKeys { + groupFailureCount := 0 if subclusters, ok := clusterGroupsMap[key]; ok { + // Calculate the max failure threshold for the group--the returned error was checked + // previously, so it's ignored here + maxGroupFailures, _ := calculateRolloutSize(maxFailures, len(subclusters), 0) // Iterate through clusters in the group clusters := subclusters.UnsortedList() sort.Strings(clusters) for _, cluster := range clusters { - if existingClusters[cluster] { + if status, ok := existingClusters[cluster]; ok { + // Keep track of TimeOut or Failed clusters and check total against MaxFailures + if status == TimeOut || status == Failed { + groupFailureCount++ + + failureBreach = groupFailureCount > maxGroupFailures + } + continue } @@ -325,11 +408,15 @@ func progressivePerGroup(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, timeo rolloutClusters = append(rolloutClusters, status) } - // As it is perGroup Return if there are clusters to rollOut - if len(rolloutClusters) > 0 { + totalFailureCount += groupFailureCount + + // As it is perGroup, return if there are clusters to rollOut that aren't + // Failed/Timeout, or there was a breach of the MaxFailure configuration + if len(rolloutClusters)-totalFailureCount > 0 || failureBreach { return RolloutResult{ ClustersToRollout: rolloutClusters, ClustersTimeOut: timeoutClusters, + MaxFailureBreach: failureBreach, } } } @@ -338,39 +425,61 @@ func progressivePerGroup(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, timeo return RolloutResult{ ClustersToRollout: rolloutClusters, ClustersTimeOut: timeoutClusters, + MaxFailureBreach: failureBreach, } } -// determineRolloutStatus checks whether a cluster should continue its rollout based on its current status and timeout. -// The function update the cluster status and append it to the expected slice. +// determineRolloutStatus checks whether a cluster should continue its rollout based on its current +// status and timeout. The function updates the cluster status and appends it to the expected slice. +// Nothing is done for TimeOut or Skip statuses. +// +// The minSuccessTime parameter is utilized for handling succeeded clusters that are still within +// the configured soak time, in which case the cluster will be returned as a rolloutCluster. // -// The timeout parameter is utilized for handling progressing and failed statuses and any other unknown status: -// 1. If timeout is set to None (maxTimeDuration), the function will append the clusterStatus to the rollOut Clusters. +// The timeout parameter is utilized for handling progressing and failed statuses and any other +// unknown status: +// 1. If timeout is set to None (maxTimeDuration), the function will append the clusterStatus to +// the rollOut Clusters. // 2. If timeout is set to 0, the function append the clusterStatus to the timeOut clusters. -func determineRolloutStatus(status ClusterRolloutStatus, timeout time.Duration, rolloutClusters []ClusterRolloutStatus, timeoutClusters []ClusterRolloutStatus) ([]ClusterRolloutStatus, []ClusterRolloutStatus) { +func determineRolloutStatus( + status *ClusterRolloutStatus, + minSuccessTime time.Duration, + timeout time.Duration, + rolloutClusters []ClusterRolloutStatus, + timeoutClusters []ClusterRolloutStatus, +) ([]ClusterRolloutStatus, []ClusterRolloutStatus) { switch status.Status { case ToApply: - rolloutClusters = append(rolloutClusters, status) - case TimeOut, Succeeded, Skip: + rolloutClusters = append(rolloutClusters, *status) + case Succeeded: + // If the cluster succeeded but is still within the MinSuccessTime (i.e. "soak" time), + // still add it to the list of rolloutClusters + minSuccessTimeTime := getTimeOutTime(status.LastTransitionTime, minSuccessTime) + if RolloutClock.Now().Before(minSuccessTimeTime.Time) { + rolloutClusters = append(rolloutClusters, *status) + } + return rolloutClusters, timeoutClusters - default: // For progressing, failed status and any other unknown status. + case TimeOut, Skip: + return rolloutClusters, timeoutClusters + default: // For progressing, failed, or unknown status. timeOutTime := getTimeOutTime(status.LastTransitionTime, timeout) status.TimeOutTime = timeOutTime - // check if current time is before the timeout time if RolloutClock.Now().Before(timeOutTime.Time) { - rolloutClusters = append(rolloutClusters, status) + rolloutClusters = append(rolloutClusters, *status) } else { status.Status = TimeOut - timeoutClusters = append(timeoutClusters, status) + timeoutClusters = append(timeoutClusters, *status) } } return rolloutClusters, timeoutClusters } -// get the timeout time +// getTimeOutTime calculates the timeout time given a start time and duration, instantiating the +// RolloutClock if a start time isn't provided. func getTimeOutTime(startTime *metav1.Time, timeout time.Duration) *metav1.Time { var timeoutTime time.Time if startTime == nil { @@ -381,34 +490,62 @@ func getTimeOutTime(startTime *metav1.Time, timeout time.Duration) *metav1.Time return &metav1.Time{Time: timeoutTime} } -func calculateRolloutSize(maxConcurrency intstr.IntOrString, total int) (int, error) { - length := total +// calculateRolloutSize calculates the maximum portion from a total number of clusters by parsing a +// maximum threshold value that can be either a quantity or a percent, returning an error if the +// threshold can't be parsed to either of those. +func calculateRolloutSize(maxThreshold intstr.IntOrString, total int, defaultThreshold int) (int, error) { + length := defaultThreshold + + // Verify the format of the IntOrString value + err := parseRolloutSize(maxThreshold) + if err != nil { + return length, err + } - switch maxConcurrency.Type { + // Calculate the rollout size--errors are ignored because + // they were handled in parseRolloutSize() previously + switch maxThreshold.Type { case intstr.Int: - length = maxConcurrency.IntValue() + length = maxThreshold.IntValue() case intstr.String: - str := maxConcurrency.StrVal + str := maxThreshold.StrVal + f, _ := strconv.ParseFloat(str[:len(str)-1], 64) + length = int(math.Ceil(f / 100 * float64(total))) + } + + if length <= 0 || length > total { + length = defaultThreshold + } + + return length, nil +} + +// parseRolloutSize parses a maximum threshold value that can be either a quantity or a percent, +// returning an error if the threshold can't be parsed to either of those. +func parseRolloutSize(maxThreshold intstr.IntOrString) error { + + switch maxThreshold.Type { + case intstr.Int: + break + case intstr.String: + str := maxThreshold.StrVal if strings.HasSuffix(str, "%") { - f, err := strconv.ParseFloat(str[:len(str)-1], 64) + _, err := strconv.ParseFloat(str[:len(str)-1], 64) if err != nil { - return length, err + return err } - length = int(math.Ceil(f / 100 * float64(total))) } else { - return length, fmt.Errorf("%v invalid type: string is not a percentage", maxConcurrency) + return fmt.Errorf("'%s' is an invalid maximum threshold value: string is not a percentage", str) } default: - return length, fmt.Errorf("incorrect MaxConcurrency type %v", maxConcurrency.Type) + return fmt.Errorf("invalid maximum threshold type %+v", maxThreshold.Type) } - if length <= 0 || length > total { - length = total - } - - return length, nil + return nil } +// ParseTimeout will return the maximum possible duration given "None", an empty string, or an +// invalid duration, otherwise parsing and returning the duration provided. func parseTimeout(timeoutStr string) (time.Duration, error) { // Define the regex pattern to match the timeout string pattern := "^(([0-9])+[h|m|s])|None$" diff --git a/vendor/open-cluster-management.io/api/cluster/v1alpha1/types_rolloutstrategy.go b/vendor/open-cluster-management.io/api/cluster/v1alpha1/types_rolloutstrategy.go index c9fa38155..4fbe6524a 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1alpha1/types_rolloutstrategy.go +++ b/vendor/open-cluster-management.io/api/cluster/v1alpha1/types_rolloutstrategy.go @@ -1,53 +1,98 @@ package v1alpha1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) // +k8s:deepcopy-gen=true -// RolloutStrategy API used by workload applier APIs to define how the workload will be applied to the selected clusters by the Placement and DecisionStrategy. +// RolloutStrategy API used by workload applier APIs to define how the workload will be applied to +// the selected clusters by the Placement and DecisionStrategy. + +type RolloutType string const ( //All means apply the workload to all clusters in the decision groups at once. - All string = "All" + All RolloutType = "All" //Progressive means apply the workload to the selected clusters progressively per cluster. - Progressive string = "Progressive" + Progressive RolloutType = "Progressive" //ProgressivePerGroup means apply the workload to the selected clusters progressively per group. - ProgressivePerGroup string = "ProgressivePerGroup" + ProgressivePerGroup RolloutType = "ProgressivePerGroup" ) // Rollout strategy to apply workload to the selected clusters by Placement and DecisionStrategy. type RolloutStrategy struct { // Rollout strategy Types are All, Progressive and ProgressivePerGroup // 1) All means apply the workload to all clusters in the decision groups at once. - // 2) Progressive means apply the workload to the selected clusters progressively per cluster. The workload will not be applied to the next cluster unless one of the current applied clusters reach the successful state or timeout. - // 3) ProgressivePerGroup means apply the workload to decisionGroup clusters progressively per group. The workload will not be applied to the next decisionGroup unless all clusters in the current group reach the successful state or timeout. + // 2) Progressive means apply the workload to the selected clusters progressively per cluster. The + // workload will not be applied to the next cluster unless one of the current applied clusters + // reach the successful state and haven't breached the MaxFailures configuration. + // 3) ProgressivePerGroup means apply the workload to decisionGroup clusters progressively per + // group. The workload will not be applied to the next decisionGroup unless all clusters in the + // current group reach the successful state and haven't breached the MaxFailures configuration. + // +kubebuilder:validation:Enum=All;Progressive;ProgressivePerGroup // +kubebuilder:default:=All // +optional - Type string `json:"type,omitempty"` + Type RolloutType `json:"type,omitempty"` - // All define required fields for RolloutStrategy type All + // All defines required fields for RolloutStrategy type All // +optional All *RolloutAll `json:"all,omitempty"` - // Progressive define required fields for RolloutStrategy type Progressive + // Progressive defines required fields for RolloutStrategy type Progressive // +optional Progressive *RolloutProgressive `json:"progressive,omitempty"` - // ProgressivePerGroup define required fields for RolloutStrategy type ProgressivePerGroup + // ProgressivePerGroup defines required fields for RolloutStrategy type ProgressivePerGroup // +optional ProgressivePerGroup *RolloutProgressivePerGroup `json:"progressivePerGroup,omitempty"` } // Timeout to consider while applying the workload. -type Timeout struct { - // Timeout define how long workload applier controller will wait till workload reach successful state in the cluster. - // Timeout default value is None meaning the workload applier will not proceed apply workload to other clusters if did not reach the successful state. +type RolloutConfig struct { + // MinSuccessTime is a "soak" time. In other words, the minimum amount of time the workload + // applier controller will wait from the start of each rollout before proceeding (assuming a + // successful state has been reached and MaxFailures wasn't breached). + // MinSuccessTime is only considered for rollout types Progressive and ProgressivePerGroup. + // The default value is 0 meaning the workload applier proceeds immediately after a successful + // state is reached. + // MinSuccessTime must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s + // +kubebuilder:default:="0" + // +optional + MinSuccessTime metav1.Duration `json:"minSuccessTime,omitempty"` + // ProgressDeadline defines how long workload applier controller will wait for the workload to + // reach a successful state in the cluster. + // ProgressDeadline default value is "None", meaning the workload applier will wait for a + // successful state indefinitely. + // ProgressDeadline must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s + // +kubebuilder:validation:Pattern="^(([0-9])+[h|m|s])|None$" + // +kubebuilder:default:="None" + // +optional + ProgressDeadline string `json:"progressDeadline,omitempty"` + // MaxFailures is a percentage or number of clusters in the current rollout that can fail before + // proceeding to the next rollout. + // MaxFailures is only considered for rollout types Progressive and ProgressivePerGroup. For + // Progressive, this is considered over the total number of clusters. For ProgressivePerGroup, + // this is considered according to the size of the current group. For both Progressive and + // ProgressivePerGroup, the MaxFailures does not apply for MandatoryDecisionGroups, which tolerate + // no failures. + // Default is that no failures are tolerated. + // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" + // +kubebuilder:validation:XIntOrString + // +kubebuilder:default="0" + // +optional + MaxFailures intstr.IntOrString `json:"maxFailures,omitempty"` + // Timeout defines how long the workload applier controller will wait until the workload reaches a + // successful state in the cluster. + // Timeout default value is None meaning the workload applier will not proceed apply workload to + // other clusters if did not reach the successful state. // Timeout must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s + // + // Deprecated: Use ProgressDeadline instead. // +kubebuilder:validation:Pattern="^(([0-9])+[h|m|s])|None$" - // +kubebuilder:default:=None + // +kubebuilder:default:="None" // +optional Timeout string `json:"timeout,omitempty"` } @@ -55,19 +100,23 @@ type Timeout struct { // MandatoryDecisionGroup set the decision group name or group index. // GroupName is considered first to select the decisionGroups then GroupIndex. type MandatoryDecisionGroup struct { - // GroupName of the decision group should match the placementDecisions label value with label key cluster.open-cluster-management.io/decision-group-name + // GroupName of the decision group should match the placementDecisions label value with label key + // cluster.open-cluster-management.io/decision-group-name // +optional GroupName string `json:"groupName,omitempty"` - // GroupIndex of the decision group should match the placementDecisions label value with label key cluster.open-cluster-management.io/decision-group-index + // GroupIndex of the decision group should match the placementDecisions label value with label key + // cluster.open-cluster-management.io/decision-group-index // +optional GroupIndex int32 `json:"groupIndex,omitempty"` } // MandatoryDecisionGroups type MandatoryDecisionGroups struct { - // List of the decision groups names or indexes to apply the workload first and fail if workload did not reach successful state. - // GroupName or GroupIndex must match with the decisionGroups defined in the placement's decisionStrategy + // List of the decision groups names or indexes to apply the workload first and fail if workload + // did not reach successful state. + // GroupName or GroupIndex must match with the decisionGroups defined in the placement's + // decisionStrategy // +optional MandatoryDecisionGroups []MandatoryDecisionGroup `json:"mandatoryDecisionGroups,omitempty"` } @@ -75,29 +124,31 @@ type MandatoryDecisionGroups struct { // RolloutAll is a RolloutStrategy Type type RolloutAll struct { // +optional - Timeout `json:",inline"` + RolloutConfig `json:",inline"` } // RolloutProgressivePerGroup is a RolloutStrategy Type type RolloutProgressivePerGroup struct { // +optional - MandatoryDecisionGroups `json:",inline"` + RolloutConfig `json:",inline"` // +optional - Timeout `json:",inline"` + MandatoryDecisionGroups `json:",inline"` } // RolloutProgressive is a RolloutStrategy Type type RolloutProgressive struct { + // +optional + RolloutConfig `json:",inline"` + // +optional MandatoryDecisionGroups `json:",inline"` - // MaxConcurrency is the max number of clusters to deploy workload concurrently. The default value for MaxConcurrency is determined from the clustersPerDecisionGroup defined in the placement->DecisionStrategy. + // MaxConcurrency is the max number of clusters to deploy workload concurrently. The default value + // for MaxConcurrency is determined from the clustersPerDecisionGroup defined in the + // placement->DecisionStrategy. // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" // +kubebuilder:validation:XIntOrString // +optional MaxConcurrency intstr.IntOrString `json:"maxConcurrency,omitempty"` - - // +optional - Timeout `json:",inline"` } diff --git a/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go b/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go index 2be1d2833..5223c00b1 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go @@ -249,7 +249,7 @@ func (in *MandatoryDecisionGroups) DeepCopy() *MandatoryDecisionGroups { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RolloutAll) DeepCopyInto(out *RolloutAll) { *out = *in - out.Timeout = in.Timeout + out.RolloutConfig = in.RolloutConfig } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutAll. @@ -262,12 +262,29 @@ func (in *RolloutAll) DeepCopy() *RolloutAll { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutConfig) DeepCopyInto(out *RolloutConfig) { + *out = *in + out.MinSuccessTime = in.MinSuccessTime + out.MaxFailures = in.MaxFailures +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutConfig. +func (in *RolloutConfig) DeepCopy() *RolloutConfig { + if in == nil { + return nil + } + out := new(RolloutConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RolloutProgressive) DeepCopyInto(out *RolloutProgressive) { *out = *in + out.RolloutConfig = in.RolloutConfig in.MandatoryDecisionGroups.DeepCopyInto(&out.MandatoryDecisionGroups) out.MaxConcurrency = in.MaxConcurrency - out.Timeout = in.Timeout } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutProgressive. @@ -283,8 +300,8 @@ func (in *RolloutProgressive) DeepCopy() *RolloutProgressive { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RolloutProgressivePerGroup) DeepCopyInto(out *RolloutProgressivePerGroup) { *out = *in + out.RolloutConfig = in.RolloutConfig in.MandatoryDecisionGroups.DeepCopyInto(&out.MandatoryDecisionGroups) - out.Timeout = in.Timeout } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutProgressivePerGroup. @@ -362,18 +379,3 @@ func (in *RolloutStrategy) DeepCopy() *RolloutStrategy { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Timeout) DeepCopyInto(out *Timeout) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timeout. -func (in *Timeout) DeepCopy() *Timeout { - if in == nil { - return nil - } - out := new(Timeout) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.swagger_doc_generated.go index 4cadc7a2e..79766e058 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.swagger_doc_generated.go @@ -105,6 +105,18 @@ func (RolloutAll) SwaggerDoc() map[string]string { return map_RolloutAll } +var map_RolloutConfig = map[string]string{ + "": "Timeout to consider while applying the workload.", + "minSuccessTime": "MinSuccessTime is a \"soak\" time. In other words, the minimum amount of time the workload applier controller will wait from the start of each rollout before proceeding (assuming a successful state has been reached and MaxFailures wasn't breached). MinSuccessTime is only considered for rollout types Progressive and ProgressivePerGroup. The default value is 0 meaning the workload applier proceeds immediately after a successful state is reached. MinSuccessTime must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s", + "progressDeadline": "ProgressDeadline defines how long workload applier controller will wait for the workload to reach a successful state in the cluster. ProgressDeadline default value is \"None\", meaning the workload applier will wait for a successful state indefinitely. ProgressDeadline must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s", + "maxFailures": "MaxFailures is a percentage or number of clusters in the current rollout that can fail before proceeding to the next rollout. MaxFailures is only considered for rollout types Progressive and ProgressivePerGroup. For Progressive, this is considered over the total number of clusters. For ProgressivePerGroup, this is considered according to the size of the current group. For both Progressive and ProgressivePerGroup, the MaxFailures does not apply for MandatoryDecisionGroups, which tolerate no failures. Default is that no failures are tolerated.", + "timeout": "Timeout defines how long the workload applier controller will wait until the workload reaches a successful state in the cluster. Timeout default value is None meaning the workload applier will not proceed apply workload to other clusters if did not reach the successful state. Timeout must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s\n\nDeprecated: Use ProgressDeadline instead.", +} + +func (RolloutConfig) SwaggerDoc() map[string]string { + return map_RolloutConfig +} + var map_RolloutProgressive = map[string]string{ "": "RolloutProgressive is a RolloutStrategy Type", "maxConcurrency": "MaxConcurrency is the max number of clusters to deploy workload concurrently. The default value for MaxConcurrency is determined from the clustersPerDecisionGroup defined in the placement->DecisionStrategy.", @@ -124,23 +136,13 @@ func (RolloutProgressivePerGroup) SwaggerDoc() map[string]string { var map_RolloutStrategy = map[string]string{ "": "Rollout strategy to apply workload to the selected clusters by Placement and DecisionStrategy.", - "type": "Rollout strategy Types are All, Progressive and ProgressivePerGroup 1) All means apply the workload to all clusters in the decision groups at once. 2) Progressive means apply the workload to the selected clusters progressively per cluster. The workload will not be applied to the next cluster unless one of the current applied clusters reach the successful state or timeout. 3) ProgressivePerGroup means apply the workload to decisionGroup clusters progressively per group. The workload will not be applied to the next decisionGroup unless all clusters in the current group reach the successful state or timeout.", - "all": "All define required fields for RolloutStrategy type All", - "progressive": "Progressive define required fields for RolloutStrategy type Progressive", - "progressivePerGroup": "ProgressivePerGroup define required fields for RolloutStrategy type ProgressivePerGroup", + "all": "All defines required fields for RolloutStrategy type All", + "progressive": "Progressive defines required fields for RolloutStrategy type Progressive", + "progressivePerGroup": "ProgressivePerGroup defines required fields for RolloutStrategy type ProgressivePerGroup", } func (RolloutStrategy) SwaggerDoc() map[string]string { return map_RolloutStrategy } -var map_Timeout = map[string]string{ - "": "Timeout to consider while applying the workload.", - "timeout": "Timeout define how long workload applier controller will wait till workload reach successful state in the cluster. Timeout default value is None meaning the workload applier will not proceed apply workload to other clusters if did not reach the successful state. Timeout must be defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m , 360s", -} - -func (Timeout) SwaggerDoc() map[string]string { - return map_Timeout -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml b/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml index ecd82e555..c5a083042 100644 --- a/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml +++ b/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml @@ -328,29 +328,73 @@ spec: rolloutStrategy: default: all: - timeout: None + progressDeadline: None type: All description: Rollout strategy to apply workload to the selected clusters by Placement and DecisionStrategy. properties: all: - description: All define required fields for RolloutStrategy + description: All defines required fields for RolloutStrategy type All properties: + maxFailures: + anyOf: + - type: integer + - type: string + default: "0" + description: MaxFailures is a percentage or number of + clusters in the current rollout that can fail before + proceeding to the next rollout. MaxFailures is only + considered for rollout types Progressive and ProgressivePerGroup. + For Progressive, this is considered over the total + number of clusters. For ProgressivePerGroup, this + is considered according to the size of the current + group. For both Progressive and ProgressivePerGroup, + the MaxFailures does not apply for MandatoryDecisionGroups, + which tolerate no failures. Default is that no failures + are tolerated. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + minSuccessTime: + default: "0" + description: MinSuccessTime is a "soak" time. In other + words, the minimum amount of time the workload applier + controller will wait from the start of each rollout + before proceeding (assuming a successful state has + been reached and MaxFailures wasn't breached). MinSuccessTime + is only considered for rollout types Progressive and + ProgressivePerGroup. The default value is 0 meaning + the workload applier proceeds immediately after a + successful state is reached. MinSuccessTime must be + defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h + , 90m , 360s + type: string + progressDeadline: + default: None + description: ProgressDeadline defines how long workload + applier controller will wait for the workload to reach + a successful state in the cluster. ProgressDeadline + default value is "None", meaning the workload applier + will wait for a successful state indefinitely. ProgressDeadline + must be defined in [0-9h]|[0-9m]|[0-9s] format examples; + 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string timeout: default: None - description: Timeout define how long workload applier - controller will wait till workload reach successful - state in the cluster. Timeout default value is None - meaning the workload applier will not proceed apply - workload to other clusters if did not reach the successful - state. Timeout must be defined in [0-9h]|[0-9m]|[0-9s] - format examples; 2h , 90m , 360s + description: "Timeout defines how long the workload + applier controller will wait until the workload reaches + a successful state in the cluster. Timeout default + value is None meaning the workload applier will not + proceed apply workload to other clusters if did not + reach the successful state. Timeout must be defined + in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m + , 360s \n Deprecated: Use ProgressDeadline instead." pattern: ^(([0-9])+[h|m|s])|None$ type: string type: object progressive: - description: Progressive define required fields for RolloutStrategy + description: Progressive defines required fields for RolloutStrategy type Progressive properties: mandatoryDecisionGroups: @@ -387,20 +431,64 @@ spec: defined in the placement->DecisionStrategy. pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ x-kubernetes-int-or-string: true + maxFailures: + anyOf: + - type: integer + - type: string + default: "0" + description: MaxFailures is a percentage or number of + clusters in the current rollout that can fail before + proceeding to the next rollout. MaxFailures is only + considered for rollout types Progressive and ProgressivePerGroup. + For Progressive, this is considered over the total + number of clusters. For ProgressivePerGroup, this + is considered according to the size of the current + group. For both Progressive and ProgressivePerGroup, + the MaxFailures does not apply for MandatoryDecisionGroups, + which tolerate no failures. Default is that no failures + are tolerated. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + minSuccessTime: + default: "0" + description: MinSuccessTime is a "soak" time. In other + words, the minimum amount of time the workload applier + controller will wait from the start of each rollout + before proceeding (assuming a successful state has + been reached and MaxFailures wasn't breached). MinSuccessTime + is only considered for rollout types Progressive and + ProgressivePerGroup. The default value is 0 meaning + the workload applier proceeds immediately after a + successful state is reached. MinSuccessTime must be + defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h + , 90m , 360s + type: string + progressDeadline: + default: None + description: ProgressDeadline defines how long workload + applier controller will wait for the workload to reach + a successful state in the cluster. ProgressDeadline + default value is "None", meaning the workload applier + will wait for a successful state indefinitely. ProgressDeadline + must be defined in [0-9h]|[0-9m]|[0-9s] format examples; + 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string timeout: default: None - description: Timeout define how long workload applier - controller will wait till workload reach successful - state in the cluster. Timeout default value is None - meaning the workload applier will not proceed apply - workload to other clusters if did not reach the successful - state. Timeout must be defined in [0-9h]|[0-9m]|[0-9s] - format examples; 2h , 90m , 360s + description: "Timeout defines how long the workload + applier controller will wait until the workload reaches + a successful state in the cluster. Timeout default + value is None meaning the workload applier will not + proceed apply workload to other clusters if did not + reach the successful state. Timeout must be defined + in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m + , 360s \n Deprecated: Use ProgressDeadline instead." pattern: ^(([0-9])+[h|m|s])|None$ type: string type: object progressivePerGroup: - description: ProgressivePerGroup define required fields + description: ProgressivePerGroup defines required fields for RolloutStrategy type ProgressivePerGroup properties: mandatoryDecisionGroups: @@ -427,31 +515,64 @@ spec: type: string type: object type: array + maxFailures: + anyOf: + - type: integer + - type: string + default: "0" + description: MaxFailures is a percentage or number of + clusters in the current rollout that can fail before + proceeding to the next rollout. MaxFailures is only + considered for rollout types Progressive and ProgressivePerGroup. + For Progressive, this is considered over the total + number of clusters. For ProgressivePerGroup, this + is considered according to the size of the current + group. For both Progressive and ProgressivePerGroup, + the MaxFailures does not apply for MandatoryDecisionGroups, + which tolerate no failures. Default is that no failures + are tolerated. + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + x-kubernetes-int-or-string: true + minSuccessTime: + default: "0" + description: MinSuccessTime is a "soak" time. In other + words, the minimum amount of time the workload applier + controller will wait from the start of each rollout + before proceeding (assuming a successful state has + been reached and MaxFailures wasn't breached). MinSuccessTime + is only considered for rollout types Progressive and + ProgressivePerGroup. The default value is 0 meaning + the workload applier proceeds immediately after a + successful state is reached. MinSuccessTime must be + defined in [0-9h]|[0-9m]|[0-9s] format examples; 2h + , 90m , 360s + type: string + progressDeadline: + default: None + description: ProgressDeadline defines how long workload + applier controller will wait for the workload to reach + a successful state in the cluster. ProgressDeadline + default value is "None", meaning the workload applier + will wait for a successful state indefinitely. ProgressDeadline + must be defined in [0-9h]|[0-9m]|[0-9s] format examples; + 2h , 90m , 360s + pattern: ^(([0-9])+[h|m|s])|None$ + type: string timeout: default: None - description: Timeout define how long workload applier - controller will wait till workload reach successful - state in the cluster. Timeout default value is None - meaning the workload applier will not proceed apply - workload to other clusters if did not reach the successful - state. Timeout must be defined in [0-9h]|[0-9m]|[0-9s] - format examples; 2h , 90m , 360s + description: "Timeout defines how long the workload + applier controller will wait until the workload reaches + a successful state in the cluster. Timeout default + value is None meaning the workload applier will not + proceed apply workload to other clusters if did not + reach the successful state. Timeout must be defined + in [0-9h]|[0-9m]|[0-9s] format examples; 2h , 90m + , 360s \n Deprecated: Use ProgressDeadline instead." pattern: ^(([0-9])+[h|m|s])|None$ type: string type: object type: default: All - description: Rollout strategy Types are All, Progressive - and ProgressivePerGroup 1) All means apply the workload - to all clusters in the decision groups at once. 2) Progressive - means apply the workload to the selected clusters progressively - per cluster. The workload will not be applied to the next - cluster unless one of the current applied clusters reach - the successful state or timeout. 3) ProgressivePerGroup - means apply the workload to decisionGroup clusters progressively - per group. The workload will not be applied to the next - decisionGroup unless all clusters in the current group - reach the successful state or timeout. enum: - All - Progressive diff --git a/vendor/open-cluster-management.io/api/work/v1alpha1/types_manifestworkreplicaset.go b/vendor/open-cluster-management.io/api/work/v1alpha1/types_manifestworkreplicaset.go index b68e00897..7c693d318 100644 --- a/vendor/open-cluster-management.io/api/work/v1alpha1/types_manifestworkreplicaset.go +++ b/vendor/open-cluster-management.io/api/work/v1alpha1/types_manifestworkreplicaset.go @@ -92,7 +92,7 @@ type LocalPlacementReference struct { Name string `json:"name"` // +optional - // +kubebuilder:default={type: All, all: {timeout: None}} + // +kubebuilder:default={type: All, all: {progressDeadline: None}} RolloutStrategy cluster.RolloutStrategy `json:"rolloutStrategy"` }