diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index bd9c6982c..d86cf2171 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -18,9 +18,9 @@ jobs: cache: false go-version-file: go.mod - run: sudo apt update && sudo apt install -y libpfm4 libpfm4-dev - - uses: golangci/golangci-lint-action@v5 + - uses: golangci/golangci-lint-action@v5.3.0 with: - version: v1.47.3 + version: v1.55.2 unit-tests: strategy: diff --git a/.github/workflows/e2e-k8s-1.22.yaml b/.github/workflows/e2e-k8s-1.22.yaml index 84ef77d20..a3e0ceeb5 100644 --- a/.github/workflows/e2e-k8s-1.22.yaml +++ b/.github/workflows/e2e-k8s-1.22.yaml @@ -10,7 +10,7 @@ on: env: # Common versions - GO_VERSION: '1.19' + GO_VERSION: '1.20' KIND_ACTION_VERSION: 'v1.5.0' KIND_VERSION: 'v0.20.0' KIND_IMAGE: 'kindest/node:v1.22.17' diff --git a/.github/workflows/e2e-k8s-1.24.yaml b/.github/workflows/e2e-k8s-1.24.yaml index 30a3cc573..4f11a0210 100644 --- a/.github/workflows/e2e-k8s-1.24.yaml +++ b/.github/workflows/e2e-k8s-1.24.yaml @@ -10,7 +10,7 @@ on: env: # Common versions - GO_VERSION: '1.19' + GO_VERSION: '1.20' KIND_ACTION_VERSION: 'v1.5.0' KIND_VERSION: 'v0.20.0' KIND_IMAGE: 'kindest/node:v1.24.15' diff --git a/.github/workflows/e2e-k8s-1.28.yaml b/.github/workflows/e2e-k8s-1.28.yaml new file mode 100644 index 000000000..eb0559990 --- /dev/null +++ b/.github/workflows/e2e-k8s-1.28.yaml @@ -0,0 +1,106 @@ +name: E2E-K8S-1.28 + +on: + push: + branches: + - main + - release-* + pull_request: {} + workflow_dispatch: {} + +env: + # Common versions + GO_VERSION: '1.20' + KIND_ACTION_VERSION: 'v1.5.0' + KIND_VERSION: 'v0.22.0' + KIND_IMAGE: 'kindest/node:v1.28.7' + KIND_CLUSTER_NAME: 'ci-testing' + COMPONENT_NS: "koordinator-system" + +jobs: + + slo-controller: + continue-on-error: true + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v4 + with: + submodules: true + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup Kind Cluster + uses: helm/kind-action@v1.9.0 + with: + node_image: ${{ env.KIND_IMAGE }} + cluster_name: ${{ env.KIND_CLUSTER_NAME }} + config: ./test/kind-conf.yaml + version: ${{ env.KIND_VERSION }} + - name: Build image + run: | + export MANAGER_IMAGE="koordinator-sh/koord-manager:e2e-${GITHUB_RUN_ID}" + docker build --pull . -t ${MANAGER_IMAGE} -f docker/koord-manager.dockerfile + export KOORDLET_IMAGE="koordinator-sh/koordlet:e2e-${GITHUB_RUN_ID}" + docker build --pull . -t ${KOORDLET_IMAGE} -f docker/koordlet.dockerfile + kind load docker-image --name=${KIND_CLUSTER_NAME} ${MANAGER_IMAGE} || { echo >&2 "kind not installed or error loading image: ${MANAGER_IMAGE}"; exit 1; } + kind load docker-image --name=${KIND_CLUSTER_NAME} ${KOORDLET_IMAGE} || { echo >&2 "kind not installed or error loading image: ${KOORDLET_IMAGE}"; exit 1; } + - name: Check host environment + run: | + set -ex + kubectl version --short + kubectl get pods -A + kubectl get nodes -o yaml + tree -L 2 /sys/ + tree -L 2 /sys/fs/cgroup + cat /proc/cpuinfo + - name: Install Koordinator + run: | + set -ex + kubectl cluster-info + MANAGER_IMG=koordinator-sh/koord-manager:e2e-${GITHUB_RUN_ID} KOORDLET_IMG=koordinator-sh/koordlet:e2e-${GITHUB_RUN_ID} ./hack/deploy_kind.sh + NODES=$(kubectl get node | wc -l) + for ((i=1;i<10;i++)); + do + set +e + PODS=$(kubectl get pod -n ${COMPONENT_NS} | grep "koord-manager\|koordlet" | grep '1/1' | wc -l) + set -e + if [ "$PODS" -ge "$NODES" ]; then + break + fi + sleep 6 + done + set +e + PODS=$(kubectl get pod -n ${COMPONENT_NS} | grep "koord-manager\|koordlet" | grep '1/1' | wc -l) + kubectl get pod -A + kubectl get node -o yaml + kubectl get all -n ${COMPONENT_NS} -o wide + kubectl get pod -n ${COMPONENT_NS} --no-headers | grep koord-manager | head -n 1 | awk '{print $1}' | xargs kubectl logs -n ${COMPONENT_NS} --tail=100 + kubectl get pod -n ${COMPONENT_NS} --no-headers | grep koord-scheduler | awk '{print $1}' | xargs kubectl logs -n ${COMPONENT_NS} --tail=100 + kubectl get pod -n ${COMPONENT_NS} --no-headers | grep koordlet | head -n 1 | awk '{print $1}' | xargs -L 1 kubectl logs -n ${COMPONENT_NS} + kubectl get pod -n ${COMPONENT_NS} -o wide + set -e + if [ "$PODS" -ge "$NODES" ]; then + echo "Wait for koord-manager and koordlet ready successfully" + else + echo "Timeout to wait for koord-manager and koordlet ready" + exit 1 + fi + - name: Run E2E Tests + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + EXTRA_ARGS="-koordinator-component-namespace=${COMPONENT_NS} -allowed-not-ready-nodes=1 -system-pods-startup-timeout=10s -e2e-verify-service-account=false" + ./bin/ginkgo -timeout 60m -v --focus='slo-controller' test/e2e -- ${EXTRA_ARGS} + retVal=$? + restartCount=$(kubectl get pod -n ${COMPONENT_NS} -l koord-app=koord-manager --no-headers | head -n 1 | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "koord-manager has not restarted" + else + kubectl get pod -n ${COMPONENT_NS} -l koord-app=koord-manager --no-headers + echo "koord-manager has restarted, abort!!!" + kubectl get pod -n ${COMPONENT_NS} --no-headers -l koord-app=koord-manager | head -n 1 | awk '{print $1}' | xargs kubectl logs -p -n ${COMPONENT_NS} + exit 1 + fi + exit $retVal diff --git a/.github/workflows/e2e-k8s-latest.yaml b/.github/workflows/e2e-k8s-latest.yaml index 01ee22c46..0d990fab1 100644 --- a/.github/workflows/e2e-k8s-latest.yaml +++ b/.github/workflows/e2e-k8s-latest.yaml @@ -10,7 +10,7 @@ on: env: # Common versions - GO_VERSION: '1.19' + GO_VERSION: '1.20' KIND_ACTION_VERSION: 'v1.5.0' KIND_VERSION: 'v0.20.0' KIND_CLUSTER_NAME: 'ci-testing' diff --git a/.golangci.yml b/.golangci.yml index 529bb76d2..29fe37a03 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -58,7 +58,7 @@ linters: fast: false disable-all: true enable: - - deadcode +# - unused - gofmt - govet - goimports diff --git a/Makefile b/Makefile index efee1fec2..cb439829a 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ KOORD_SCHEDULER_IMG ?= "${REG}/${REG_NS}/koord-scheduler:${GIT_BRANCH}-${GIT_COM KOORD_DESCHEDULER_IMG ?= "${REG}/${REG_NS}/koord-descheduler:${GIT_BRANCH}-${GIT_COMMIT_ID}" # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.22 +ENVTEST_K8S_VERSION = 1.28 AGENT_MODE ?= hostMode # Set license header files. @@ -65,7 +65,6 @@ help: ## Display this help. .PHONY: manifests manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases - @hack/fix_crd_plural.sh .PHONY: generate generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. @@ -217,8 +216,8 @@ HACK_DIR ?= $(PWD)/hack ## Tool Versions KUSTOMIZE_VERSION ?= v3.8.7 -CONTROLLER_TOOLS_VERSION ?= v0.9.0 -GOLANGCILINT_VERSION ?= v1.47.3 +CONTROLLER_TOOLS_VERSION ?= v0.14.0 +GOLANGCILINT_VERSION ?= v1.55.2 GINKGO_VERSION ?= v1.16.4 KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" diff --git a/apis/analysis/v1alpha1/zz_generated.deepcopy.go b/apis/analysis/v1alpha1/zz_generated.deepcopy.go index 24dd7caf9..d17a0da13 100644 --- a/apis/analysis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/analysis/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2022 The Koordinator Authors. diff --git a/apis/config/v1alpha1/zz_generated.deepcopy.go b/apis/config/v1alpha1/zz_generated.deepcopy.go index 954e864b0..ce9af92cd 100644 --- a/apis/config/v1alpha1/zz_generated.deepcopy.go +++ b/apis/config/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2022 The Koordinator Authors. diff --git a/apis/configuration/zz_generated.deepcopy.go b/apis/configuration/zz_generated.deepcopy.go index 946cc8563..6de05c540 100644 --- a/apis/configuration/zz_generated.deepcopy.go +++ b/apis/configuration/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2022 The Koordinator Authors. diff --git a/apis/extension/elastic_quota.go b/apis/extension/elastic_quota.go index 16e76ff12..e03d7f353 100644 --- a/apis/extension/elastic_quota.go +++ b/apis/extension/elastic_quota.go @@ -22,7 +22,8 @@ import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/apiserver/pkg/quota/v1" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" ) // RootQuotaName means quotaTree's root\head. diff --git a/apis/quota/v1alpha1/zz_generated.deepcopy.go b/apis/quota/v1alpha1/zz_generated.deepcopy.go index d3e382561..da6f6b131 100644 --- a/apis/quota/v1alpha1/zz_generated.deepcopy.go +++ b/apis/quota/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2022 The Koordinator Authors. diff --git a/apis/scheduling/v1alpha1/zz_generated.deepcopy.go b/apis/scheduling/v1alpha1/zz_generated.deepcopy.go index 877e86621..978f9d640 100644 --- a/apis/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/apis/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2022 The Koordinator Authors. diff --git a/apis/slo/v1alpha1/zz_generated.deepcopy.go b/apis/slo/v1alpha1/zz_generated.deepcopy.go index 575209cac..8eba59ec6 100644 --- a/apis/slo/v1alpha1/zz_generated.deepcopy.go +++ b/apis/slo/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2022 The Koordinator Authors. diff --git a/apis/thirdparty/scheduler-plugins/LICENSE b/apis/thirdparty/scheduler-plugins/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/register.go b/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/register.go new file mode 100644 index 000000000..24ec5eeb4 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/register.go @@ -0,0 +1,22 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduling + +// GroupName is the group name used in this package +const ( + GroupName = "scheduling.sigs.k8s.io" +) diff --git a/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1/doc.go b/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1/doc.go new file mode 100644 index 000000000..cef1f8066 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=scheduling.sigs.k8s.io + +// Package v1alpha1 is the v1alpha1 version of the API. +package v1alpha1 diff --git a/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1/register.go b/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1/register.go new file mode 100644 index 000000000..318f3bf4d --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1/register.go @@ -0,0 +1,57 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: scheduling.GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder initializes a scheme builder + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ElasticQuota{}, + &ElasticQuotaList{}, + &PodGroup{}, + &PodGroupList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1/types.go b/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1/types.go new file mode 100644 index 000000000..75002220a --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1/types.go @@ -0,0 +1,195 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling" +) + +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubernetes-sigs/scheduler-plugins/pull/52" + +// ElasticQuota sets elastic quota restrictions per namespace +type ElasticQuota struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // ElasticQuotaSpec defines the Min and Max for Quota. + // +optional + Spec ElasticQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // ElasticQuotaStatus defines the observed use. + // +optional + Status ElasticQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ElasticQuotaSpec defines the Min and Max for Quota. +type ElasticQuotaSpec struct { + // Min is the set of desired guaranteed limits for each named resource. + // +optional + Min v1.ResourceList `json:"min,omitempty" protobuf:"bytes,1,rep,name=min, casttype=ResourceList,castkey=ResourceName"` + + // Max is the set of desired max limits for each named resource. The usage of max is based on the resource configurations of + // successfully scheduled pods. + // +optional + Max v1.ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max, casttype=ResourceList,castkey=ResourceName"` +} + +// ElasticQuotaStatus defines the observed use. +type ElasticQuotaStatus struct { + // Used is the current observed total usage of the resource in the namespace. + // +optional + Used v1.ResourceList `json:"used,omitempty" protobuf:"bytes,1,rep,name=used,casttype=ResourceList,castkey=ResourceName"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ElasticQuotaList is a list of ElasticQuota items. +type ElasticQuotaList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ElasticQuota objects. + Items []ElasticQuota `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PodGroupPhase is the phase of a pod group at the current time. +type PodGroupPhase string + +// These are the valid phase of podGroups. +const ( + // PodGroupPending means the pod group has been accepted by the system, but scheduler can not allocate + // enough resources to it. + PodGroupPending PodGroupPhase = "Pending" + + // PodGroupRunning means `spec.minMember` pods of PodGroups has been in running phase. + PodGroupRunning PodGroupPhase = "Running" + + // PodGroupPreScheduling means all of pods has been are waiting to be scheduled, enqueue waitingPod + PodGroupPreScheduling PodGroupPhase = "PreScheduling" + + // PodGroupScheduling means some of pods has been scheduling in running phase but have not reach the `spec. + // minMember` pods of PodGroups. + PodGroupScheduling PodGroupPhase = "Scheduling" + + // PodGroupScheduled means `spec.minMember` pods of PodGroups have been scheduled finished and pods have been in running + // phase. + PodGroupScheduled PodGroupPhase = "Scheduled" + + // PodGroupUnknown means part of `spec.minMember` pods are running but the other part can not + // be scheduled, e.g. not enough resource; scheduler will wait for related controller to recover it. + PodGroupUnknown PodGroupPhase = "Unknown" + + // PodGroupFinished means all of `spec.minMember` pods are successfully. + PodGroupFinished PodGroupPhase = "Finished" + + // PodGroupFailed means at least one of `spec.minMember` pods is failed. + PodGroupFailed PodGroupPhase = "Failed" + + // PodGroupLabel is the default label of coscheduling + PodGroupLabel = "pod-group." + scheduling.GroupName +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubernetes-sigs/scheduler-plugins/pull/52" + +// PodGroup is a collection of Pod; used for batch workload. +type PodGroup struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Specification of the desired behavior of the pod group. + // +optional + Spec PodGroupSpec `json:"spec,omitempty"` + + // Status represents the current information about a pod group. + // This data may not be up to date. + // +optional + Status PodGroupStatus `json:"status,omitempty"` +} + +// PodGroupSpec represents the template of a pod group. +type PodGroupSpec struct { + // MinMember defines the minimal number of members/tasks to run the pod group; + // if there's not enough resources to start all tasks, the scheduler + // will not start anyone. + MinMember int32 `json:"minMember,omitempty"` + + // MinResources defines the minimal resource of members/tasks to run the pod group; + // if there's not enough resources to start all tasks, the scheduler + // will not start anyone. + MinResources *v1.ResourceList `json:"minResources,omitempty"` + + // ScheduleTimeoutSeconds defines the maximal time of members/tasks to wait before run the pod group; + ScheduleTimeoutSeconds *int32 `json:"scheduleTimeoutSeconds,omitempty"` +} + +// PodGroupStatus represents the current state of a pod group. +type PodGroupStatus struct { + // Current phase of PodGroup. + Phase PodGroupPhase `json:"phase,omitempty"` + + // OccupiedBy marks the workload (e.g., deployment, statefulset) UID that occupy the podgroup. + // It is empty if not initialized. + OccupiedBy string `json:"occupiedBy,omitempty"` + + // The number of actively running pods. + // +optional + Scheduled int32 `json:"scheduled,omitempty"` + + // The number of actively running pods. + // +optional + Running int32 `json:"running,omitempty"` + + // The number of pods which reached phase Succeeded. + // +optional + Succeeded int32 `json:"succeeded,omitempty"` + + // The number of pods which reached phase Failed. + // +optional + Failed int32 `json:"failed,omitempty"` + + // ScheduleStartTime of the group + ScheduleStartTime metav1.Time `json:"scheduleStartTime,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodGroupList is a collection of pod groups. +type PodGroupList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + // Items is the list of PodGroup + Items []PodGroup `json:"items"` +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go b/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..fbae959ed --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,241 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticQuota) DeepCopyInto(out *ElasticQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticQuota. +func (in *ElasticQuota) DeepCopy() *ElasticQuota { + if in == nil { + return nil + } + out := new(ElasticQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ElasticQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticQuotaList) DeepCopyInto(out *ElasticQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ElasticQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticQuotaList. +func (in *ElasticQuotaList) DeepCopy() *ElasticQuotaList { + if in == nil { + return nil + } + out := new(ElasticQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ElasticQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticQuotaSpec) DeepCopyInto(out *ElasticQuotaSpec) { + *out = *in + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticQuotaSpec. +func (in *ElasticQuotaSpec) DeepCopy() *ElasticQuotaSpec { + if in == nil { + return nil + } + out := new(ElasticQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticQuotaStatus) DeepCopyInto(out *ElasticQuotaStatus) { + *out = *in + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticQuotaStatus. +func (in *ElasticQuotaStatus) DeepCopy() *ElasticQuotaStatus { + if in == nil { + return nil + } + out := new(ElasticQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodGroup) DeepCopyInto(out *PodGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroup. +func (in *PodGroup) DeepCopy() *PodGroup { + if in == nil { + return nil + } + out := new(PodGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodGroupList) DeepCopyInto(out *PodGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroupList. +func (in *PodGroupList) DeepCopy() *PodGroupList { + if in == nil { + return nil + } + out := new(PodGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodGroupSpec) DeepCopyInto(out *PodGroupSpec) { + *out = *in + if in.MinResources != nil { + in, out := &in.MinResources, &out.MinResources + *out = new(v1.ResourceList) + if **in != nil { + in, out := *in, *out + *out = make(map[v1.ResourceName]resource.Quantity, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + } + if in.ScheduleTimeoutSeconds != nil { + in, out := &in.ScheduleTimeoutSeconds, &out.ScheduleTimeoutSeconds + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroupSpec. +func (in *PodGroupSpec) DeepCopy() *PodGroupSpec { + if in == nil { + return nil + } + out := new(PodGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodGroupStatus) DeepCopyInto(out *PodGroupStatus) { + *out = *in + in.ScheduleStartTime.DeepCopyInto(&out.ScheduleStartTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroupStatus. +func (in *PodGroupStatus) DeepCopy() *PodGroupStatus { + if in == nil { + return nil + } + out := new(PodGroupStatus) + in.DeepCopyInto(out) + return out +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/clientset.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/clientset.go new file mode 100644 index 000000000..8c6c11430 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/clientset.go @@ -0,0 +1,97 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + schedulingv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client +} + +// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client +func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { + return c.schedulingV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/doc.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/doc.go new file mode 100644 index 000000000..bff37e9c1 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake/clientset_generated.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 000000000..4fd115c15 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,85 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" + schedulingv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1" + fakeschedulingv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client +func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { + return &fakeschedulingv1alpha1.FakeSchedulingV1alpha1{Fake: &c.Fake} +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake/doc.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake/doc.go new file mode 100644 index 000000000..8664c03b8 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake/register.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake/register.go new file mode 100644 index 000000000..09b2cddb7 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + schedulingv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + schedulingv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/scheme/doc.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..be5ce07e6 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/scheme/register.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..64088a898 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + schedulingv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + schedulingv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/doc.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/doc.go new file mode 100644 index 000000000..28802f366 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/elasticquota.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/elasticquota.go new file mode 100644 index 000000000..e27f2d694 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/elasticquota.go @@ -0,0 +1,178 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + scheme "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ElasticQuotasGetter has a method to return a ElasticQuotaInterface. +// A group's client should implement this interface. +type ElasticQuotasGetter interface { + ElasticQuotas(namespace string) ElasticQuotaInterface +} + +// ElasticQuotaInterface has methods to work with ElasticQuota resources. +type ElasticQuotaInterface interface { + Create(ctx context.Context, elasticQuota *v1alpha1.ElasticQuota, opts v1.CreateOptions) (*v1alpha1.ElasticQuota, error) + Update(ctx context.Context, elasticQuota *v1alpha1.ElasticQuota, opts v1.UpdateOptions) (*v1alpha1.ElasticQuota, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ElasticQuota, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ElasticQuotaList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ElasticQuota, err error) + ElasticQuotaExpansion +} + +// elasticQuotas implements ElasticQuotaInterface +type elasticQuotas struct { + client rest.Interface + ns string +} + +// newElasticQuotas returns a ElasticQuotas +func newElasticQuotas(c *SchedulingV1alpha1Client, namespace string) *elasticQuotas { + return &elasticQuotas{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the elasticQuota, and returns the corresponding elasticQuota object, and an error if there is any. +func (c *elasticQuotas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ElasticQuota, err error) { + result = &v1alpha1.ElasticQuota{} + err = c.client.Get(). + Namespace(c.ns). + Resource("elasticquotas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ElasticQuotas that match those selectors. +func (c *elasticQuotas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ElasticQuotaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ElasticQuotaList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("elasticquotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested elasticQuotas. +func (c *elasticQuotas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("elasticquotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a elasticQuota and creates it. Returns the server's representation of the elasticQuota, and an error, if there is any. +func (c *elasticQuotas) Create(ctx context.Context, elasticQuota *v1alpha1.ElasticQuota, opts v1.CreateOptions) (result *v1alpha1.ElasticQuota, err error) { + result = &v1alpha1.ElasticQuota{} + err = c.client.Post(). + Namespace(c.ns). + Resource("elasticquotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(elasticQuota). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a elasticQuota and updates it. Returns the server's representation of the elasticQuota, and an error, if there is any. +func (c *elasticQuotas) Update(ctx context.Context, elasticQuota *v1alpha1.ElasticQuota, opts v1.UpdateOptions) (result *v1alpha1.ElasticQuota, err error) { + result = &v1alpha1.ElasticQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("elasticquotas"). + Name(elasticQuota.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(elasticQuota). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the elasticQuota and deletes it. Returns an error if one occurs. +func (c *elasticQuotas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("elasticquotas"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *elasticQuotas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("elasticquotas"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched elasticQuota. +func (c *elasticQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ElasticQuota, err error) { + result = &v1alpha1.ElasticQuota{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("elasticquotas"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/fake/doc.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/fake/doc.go new file mode 100644 index 000000000..a537e15bf --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_elasticquota.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_elasticquota.go new file mode 100644 index 000000000..68ddd11a0 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_elasticquota.go @@ -0,0 +1,130 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeElasticQuotas implements ElasticQuotaInterface +type FakeElasticQuotas struct { + Fake *FakeSchedulingV1alpha1 + ns string +} + +var elasticquotasResource = schema.GroupVersionResource{Group: "scheduling.sigs.k8s.io", Version: "v1alpha1", Resource: "elasticquotas"} + +var elasticquotasKind = schema.GroupVersionKind{Group: "scheduling.sigs.k8s.io", Version: "v1alpha1", Kind: "ElasticQuota"} + +// Get takes name of the elasticQuota, and returns the corresponding elasticQuota object, and an error if there is any. +func (c *FakeElasticQuotas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ElasticQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(elasticquotasResource, c.ns, name), &v1alpha1.ElasticQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ElasticQuota), err +} + +// List takes label and field selectors, and returns the list of ElasticQuotas that match those selectors. +func (c *FakeElasticQuotas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ElasticQuotaList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(elasticquotasResource, elasticquotasKind, c.ns, opts), &v1alpha1.ElasticQuotaList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ElasticQuotaList{ListMeta: obj.(*v1alpha1.ElasticQuotaList).ListMeta} + for _, item := range obj.(*v1alpha1.ElasticQuotaList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested elasticQuotas. +func (c *FakeElasticQuotas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(elasticquotasResource, c.ns, opts)) + +} + +// Create takes the representation of a elasticQuota and creates it. Returns the server's representation of the elasticQuota, and an error, if there is any. +func (c *FakeElasticQuotas) Create(ctx context.Context, elasticQuota *v1alpha1.ElasticQuota, opts v1.CreateOptions) (result *v1alpha1.ElasticQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(elasticquotasResource, c.ns, elasticQuota), &v1alpha1.ElasticQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ElasticQuota), err +} + +// Update takes the representation of a elasticQuota and updates it. Returns the server's representation of the elasticQuota, and an error, if there is any. +func (c *FakeElasticQuotas) Update(ctx context.Context, elasticQuota *v1alpha1.ElasticQuota, opts v1.UpdateOptions) (result *v1alpha1.ElasticQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(elasticquotasResource, c.ns, elasticQuota), &v1alpha1.ElasticQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ElasticQuota), err +} + +// Delete takes name of the elasticQuota and deletes it. Returns an error if one occurs. +func (c *FakeElasticQuotas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(elasticquotasResource, c.ns, name), &v1alpha1.ElasticQuota{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeElasticQuotas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(elasticquotasResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ElasticQuotaList{}) + return err +} + +// Patch applies the patch and returns the patched elasticQuota. +func (c *FakeElasticQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ElasticQuota, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(elasticquotasResource, c.ns, name, pt, data, subresources...), &v1alpha1.ElasticQuota{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ElasticQuota), err +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_podgroup.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_podgroup.go new file mode 100644 index 000000000..32e863080 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_podgroup.go @@ -0,0 +1,142 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePodGroups implements PodGroupInterface +type FakePodGroups struct { + Fake *FakeSchedulingV1alpha1 + ns string +} + +var podgroupsResource = schema.GroupVersionResource{Group: "scheduling.sigs.k8s.io", Version: "v1alpha1", Resource: "podgroups"} + +var podgroupsKind = schema.GroupVersionKind{Group: "scheduling.sigs.k8s.io", Version: "v1alpha1", Kind: "PodGroup"} + +// Get takes name of the podGroup, and returns the corresponding podGroup object, and an error if there is any. +func (c *FakePodGroups) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(podgroupsResource, c.ns, name), &v1alpha1.PodGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodGroup), err +} + +// List takes label and field selectors, and returns the list of PodGroups that match those selectors. +func (c *FakePodGroups) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodGroupList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(podgroupsResource, podgroupsKind, c.ns, opts), &v1alpha1.PodGroupList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.PodGroupList{ListMeta: obj.(*v1alpha1.PodGroupList).ListMeta} + for _, item := range obj.(*v1alpha1.PodGroupList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podGroups. +func (c *FakePodGroups) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(podgroupsResource, c.ns, opts)) + +} + +// Create takes the representation of a podGroup and creates it. Returns the server's representation of the podGroup, and an error, if there is any. +func (c *FakePodGroups) Create(ctx context.Context, podGroup *v1alpha1.PodGroup, opts v1.CreateOptions) (result *v1alpha1.PodGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(podgroupsResource, c.ns, podGroup), &v1alpha1.PodGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodGroup), err +} + +// Update takes the representation of a podGroup and updates it. Returns the server's representation of the podGroup, and an error, if there is any. +func (c *FakePodGroups) Update(ctx context.Context, podGroup *v1alpha1.PodGroup, opts v1.UpdateOptions) (result *v1alpha1.PodGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podgroupsResource, c.ns, podGroup), &v1alpha1.PodGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodGroup), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePodGroups) UpdateStatus(ctx context.Context, podGroup *v1alpha1.PodGroup, opts v1.UpdateOptions) (*v1alpha1.PodGroup, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(podgroupsResource, "status", c.ns, podGroup), &v1alpha1.PodGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodGroup), err +} + +// Delete takes name of the podGroup and deletes it. Returns an error if one occurs. +func (c *FakePodGroups) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(podgroupsResource, c.ns, name), &v1alpha1.PodGroup{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePodGroups) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podgroupsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.PodGroupList{}) + return err +} + +// Patch applies the patch and returns the patched podGroup. +func (c *FakePodGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodGroup, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(podgroupsResource, c.ns, name, pt, data, subresources...), &v1alpha1.PodGroup{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PodGroup), err +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go new file mode 100644 index 000000000..3e8bef128 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go @@ -0,0 +1,44 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeSchedulingV1alpha1 struct { + *testing.Fake +} + +func (c *FakeSchedulingV1alpha1) ElasticQuotas(namespace string) v1alpha1.ElasticQuotaInterface { + return &FakeElasticQuotas{c, namespace} +} + +func (c *FakeSchedulingV1alpha1) PodGroups(namespace string) v1alpha1.PodGroupInterface { + return &FakePodGroups{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeSchedulingV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..8d1454eb7 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ElasticQuotaExpansion interface{} + +type PodGroupExpansion interface{} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/podgroup.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/podgroup.go new file mode 100644 index 000000000..5334e7b8c --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/podgroup.go @@ -0,0 +1,195 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + scheme "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PodGroupsGetter has a method to return a PodGroupInterface. +// A group's client should implement this interface. +type PodGroupsGetter interface { + PodGroups(namespace string) PodGroupInterface +} + +// PodGroupInterface has methods to work with PodGroup resources. +type PodGroupInterface interface { + Create(ctx context.Context, podGroup *v1alpha1.PodGroup, opts v1.CreateOptions) (*v1alpha1.PodGroup, error) + Update(ctx context.Context, podGroup *v1alpha1.PodGroup, opts v1.UpdateOptions) (*v1alpha1.PodGroup, error) + UpdateStatus(ctx context.Context, podGroup *v1alpha1.PodGroup, opts v1.UpdateOptions) (*v1alpha1.PodGroup, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodGroup, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodGroupList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodGroup, err error) + PodGroupExpansion +} + +// podGroups implements PodGroupInterface +type podGroups struct { + client rest.Interface + ns string +} + +// newPodGroups returns a PodGroups +func newPodGroups(c *SchedulingV1alpha1Client, namespace string) *podGroups { + return &podGroups{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podGroup, and returns the corresponding podGroup object, and an error if there is any. +func (c *podGroups) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodGroup, err error) { + result = &v1alpha1.PodGroup{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podgroups"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodGroups that match those selectors. +func (c *podGroups) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodGroupList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.PodGroupList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podgroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podGroups. +func (c *podGroups) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podgroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a podGroup and creates it. Returns the server's representation of the podGroup, and an error, if there is any. +func (c *podGroups) Create(ctx context.Context, podGroup *v1alpha1.PodGroup, opts v1.CreateOptions) (result *v1alpha1.PodGroup, err error) { + result = &v1alpha1.PodGroup{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podgroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podGroup). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a podGroup and updates it. Returns the server's representation of the podGroup, and an error, if there is any. +func (c *podGroups) Update(ctx context.Context, podGroup *v1alpha1.PodGroup, opts v1.UpdateOptions) (result *v1alpha1.PodGroup, err error) { + result = &v1alpha1.PodGroup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podgroups"). + Name(podGroup.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podGroup). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *podGroups) UpdateStatus(ctx context.Context, podGroup *v1alpha1.PodGroup, opts v1.UpdateOptions) (result *v1alpha1.PodGroup, err error) { + result = &v1alpha1.PodGroup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podgroups"). + Name(podGroup.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podGroup). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the podGroup and deletes it. Returns an error if one occurs. +func (c *podGroups) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podgroups"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podGroups) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("podgroups"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched podGroup. +func (c *podGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodGroup, err error) { + result = &v1alpha1.PodGroup{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podgroups"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go new file mode 100644 index 000000000..f8ff9e274 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go @@ -0,0 +1,94 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type SchedulingV1alpha1Interface interface { + RESTClient() rest.Interface + ElasticQuotasGetter + PodGroupsGetter +} + +// SchedulingV1alpha1Client is used to interact with features provided by the scheduling.sigs.k8s.io group. +type SchedulingV1alpha1Client struct { + restClient rest.Interface +} + +func (c *SchedulingV1alpha1Client) ElasticQuotas(namespace string) ElasticQuotaInterface { + return newElasticQuotas(c, namespace) +} + +func (c *SchedulingV1alpha1Client) PodGroups(namespace string) PodGroupInterface { + return newPodGroups(c, namespace) +} + +// NewForConfig creates a new SchedulingV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*SchedulingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SchedulingV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new SchedulingV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SchedulingV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SchedulingV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *SchedulingV1alpha1Client { + return &SchedulingV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SchedulingV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/factory.go b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/factory.go new file mode 100644 index 000000000..f61fdd42e --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/factory.go @@ -0,0 +1,180 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" + internalinterfaces "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/internalinterfaces" + scheduling "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Scheduling() scheduling.Interface +} + +func (f *sharedInformerFactory) Scheduling() scheduling.Interface { + return scheduling.New(f, f.namespace, f.tweakListOptions) +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/generic.go b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/generic.go new file mode 100644 index 000000000..2925fe2fa --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/generic.go @@ -0,0 +1,64 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=scheduling.sigs.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("elasticquotas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().ElasticQuotas().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("podgroups"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().PodGroups().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 000000000..287fd661e --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/interface.go b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/interface.go new file mode 100644 index 000000000..40e532f1f --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package scheduling + +import ( + internalinterfaces "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1/elasticquota.go b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1/elasticquota.go new file mode 100644 index 000000000..53e5ba49f --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1/elasticquota.go @@ -0,0 +1,90 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + schedulingv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + versioned "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" + internalinterfaces "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ElasticQuotaInformer provides access to a shared informer and lister for +// ElasticQuotas. +type ElasticQuotaInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ElasticQuotaLister +} + +type elasticQuotaInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewElasticQuotaInformer constructs a new informer for ElasticQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewElasticQuotaInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredElasticQuotaInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredElasticQuotaInformer constructs a new informer for ElasticQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredElasticQuotaInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SchedulingV1alpha1().ElasticQuotas(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SchedulingV1alpha1().ElasticQuotas(namespace).Watch(context.TODO(), options) + }, + }, + &schedulingv1alpha1.ElasticQuota{}, + resyncPeriod, + indexers, + ) +} + +func (f *elasticQuotaInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredElasticQuotaInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *elasticQuotaInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&schedulingv1alpha1.ElasticQuota{}, f.defaultInformer) +} + +func (f *elasticQuotaInformer) Lister() v1alpha1.ElasticQuotaLister { + return v1alpha1.NewElasticQuotaLister(f.Informer().GetIndexer()) +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1/interface.go b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1/interface.go new file mode 100644 index 000000000..8dc28584d --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ElasticQuotas returns a ElasticQuotaInformer. + ElasticQuotas() ElasticQuotaInformer + // PodGroups returns a PodGroupInformer. + PodGroups() PodGroupInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ElasticQuotas returns a ElasticQuotaInformer. +func (v *version) ElasticQuotas() ElasticQuotaInformer { + return &elasticQuotaInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// PodGroups returns a PodGroupInformer. +func (v *version) PodGroups() PodGroupInformer { + return &podGroupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1/podgroup.go b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1/podgroup.go new file mode 100644 index 000000000..0a17303e9 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1/podgroup.go @@ -0,0 +1,90 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + schedulingv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + versioned "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" + internalinterfaces "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PodGroupInformer provides access to a shared informer and lister for +// PodGroups. +type PodGroupInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.PodGroupLister +} + +type podGroupInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodGroupInformer constructs a new informer for PodGroup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodGroupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodGroupInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodGroupInformer constructs a new informer for PodGroup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodGroupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SchedulingV1alpha1().PodGroups(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SchedulingV1alpha1().PodGroups(namespace).Watch(context.TODO(), options) + }, + }, + &schedulingv1alpha1.PodGroup{}, + resyncPeriod, + indexers, + ) +} + +func (f *podGroupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodGroupInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podGroupInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&schedulingv1alpha1.PodGroup{}, f.defaultInformer) +} + +func (f *podGroupInformer) Lister() v1alpha1.PodGroupLister { + return v1alpha1.NewPodGroupLister(f.Informer().GetIndexer()) +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1/elasticquota.go b/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1/elasticquota.go new file mode 100644 index 000000000..a9676af9b --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1/elasticquota.go @@ -0,0 +1,99 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ElasticQuotaLister helps list ElasticQuotas. +// All objects returned here must be treated as read-only. +type ElasticQuotaLister interface { + // List lists all ElasticQuotas in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ElasticQuota, err error) + // ElasticQuotas returns an object that can list and get ElasticQuotas. + ElasticQuotas(namespace string) ElasticQuotaNamespaceLister + ElasticQuotaListerExpansion +} + +// elasticQuotaLister implements the ElasticQuotaLister interface. +type elasticQuotaLister struct { + indexer cache.Indexer +} + +// NewElasticQuotaLister returns a new ElasticQuotaLister. +func NewElasticQuotaLister(indexer cache.Indexer) ElasticQuotaLister { + return &elasticQuotaLister{indexer: indexer} +} + +// List lists all ElasticQuotas in the indexer. +func (s *elasticQuotaLister) List(selector labels.Selector) (ret []*v1alpha1.ElasticQuota, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ElasticQuota)) + }) + return ret, err +} + +// ElasticQuotas returns an object that can list and get ElasticQuotas. +func (s *elasticQuotaLister) ElasticQuotas(namespace string) ElasticQuotaNamespaceLister { + return elasticQuotaNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ElasticQuotaNamespaceLister helps list and get ElasticQuotas. +// All objects returned here must be treated as read-only. +type ElasticQuotaNamespaceLister interface { + // List lists all ElasticQuotas in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ElasticQuota, err error) + // Get retrieves the ElasticQuota from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.ElasticQuota, error) + ElasticQuotaNamespaceListerExpansion +} + +// elasticQuotaNamespaceLister implements the ElasticQuotaNamespaceLister +// interface. +type elasticQuotaNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ElasticQuotas in the indexer for a given namespace. +func (s elasticQuotaNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ElasticQuota, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ElasticQuota)) + }) + return ret, err +} + +// Get retrieves the ElasticQuota from the indexer for a given namespace and name. +func (s elasticQuotaNamespaceLister) Get(name string) (*v1alpha1.ElasticQuota, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("elasticquota"), name) + } + return obj.(*v1alpha1.ElasticQuota), nil +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1/expansion_generated.go b/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1/expansion_generated.go new file mode 100644 index 000000000..375750835 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1/expansion_generated.go @@ -0,0 +1,35 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// ElasticQuotaListerExpansion allows custom methods to be added to +// ElasticQuotaLister. +type ElasticQuotaListerExpansion interface{} + +// ElasticQuotaNamespaceListerExpansion allows custom methods to be added to +// ElasticQuotaNamespaceLister. +type ElasticQuotaNamespaceListerExpansion interface{} + +// PodGroupListerExpansion allows custom methods to be added to +// PodGroupLister. +type PodGroupListerExpansion interface{} + +// PodGroupNamespaceListerExpansion allows custom methods to be added to +// PodGroupNamespaceLister. +type PodGroupNamespaceListerExpansion interface{} diff --git a/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1/podgroup.go b/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1/podgroup.go new file mode 100644 index 000000000..bac1f1b4b --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1/podgroup.go @@ -0,0 +1,99 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodGroupLister helps list PodGroups. +// All objects returned here must be treated as read-only. +type PodGroupLister interface { + // List lists all PodGroups in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.PodGroup, err error) + // PodGroups returns an object that can list and get PodGroups. + PodGroups(namespace string) PodGroupNamespaceLister + PodGroupListerExpansion +} + +// podGroupLister implements the PodGroupLister interface. +type podGroupLister struct { + indexer cache.Indexer +} + +// NewPodGroupLister returns a new PodGroupLister. +func NewPodGroupLister(indexer cache.Indexer) PodGroupLister { + return &podGroupLister{indexer: indexer} +} + +// List lists all PodGroups in the indexer. +func (s *podGroupLister) List(selector labels.Selector) (ret []*v1alpha1.PodGroup, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PodGroup)) + }) + return ret, err +} + +// PodGroups returns an object that can list and get PodGroups. +func (s *podGroupLister) PodGroups(namespace string) PodGroupNamespaceLister { + return podGroupNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodGroupNamespaceLister helps list and get PodGroups. +// All objects returned here must be treated as read-only. +type PodGroupNamespaceLister interface { + // List lists all PodGroups in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.PodGroup, err error) + // Get retrieves the PodGroup from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.PodGroup, error) + PodGroupNamespaceListerExpansion +} + +// podGroupNamespaceLister implements the PodGroupNamespaceLister +// interface. +type podGroupNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodGroups in the indexer for a given namespace. +func (s podGroupNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.PodGroup, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PodGroup)) + }) + return ret, err +} + +// Get retrieves the PodGroup from the indexer for a given namespace and name. +func (s podGroupNamespaceLister) Get(name string) (*v1alpha1.PodGroup, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("podgroup"), name) + } + return obj.(*v1alpha1.PodGroup), nil +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/util/podgroup.go b/apis/thirdparty/scheduler-plugins/pkg/util/podgroup.go new file mode 100644 index 000000000..b74e634b8 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/util/podgroup.go @@ -0,0 +1,76 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "encoding/json" + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/strategicpatch" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" +) + +// DefaultWaitTime is 60s if ScheduleTimeoutSeconds is not specified. +const DefaultWaitTime = 60 * time.Second + +// CreateMergePatch return patch generated from original and new interfaces +func CreateMergePatch(original, new interface{}) ([]byte, error) { + pvByte, err := json.Marshal(original) + if err != nil { + return nil, err + } + cloneByte, err := json.Marshal(new) + if err != nil { + return nil, err + } + patch, err := strategicpatch.CreateTwoWayMergePatch(pvByte, cloneByte, original) + if err != nil { + return nil, err + } + return patch, nil +} + +// GetPodGroupLabel get pod group from pod annotations +func GetPodGroupLabel(pod *v1.Pod) string { + return pod.Labels[v1alpha1.PodGroupLabel] +} + +// GetPodGroupFullName get namespaced group name from pod annotations +func GetPodGroupFullName(pod *v1.Pod) string { + pgName := GetPodGroupLabel(pod) + if len(pgName) == 0 { + return "" + } + return fmt.Sprintf("%v/%v", pod.Namespace, pgName) +} + +// GetWaitTimeDuration returns a wait timeout based on the following precedences: +// 1. spec.scheduleTimeoutSeconds of the given pg, if specified +// 2. given scheduleTimeout, if not nil +// 3. fall back to DefaultWaitTime +func GetWaitTimeDuration(pg *v1alpha1.PodGroup, scheduleTimeout *time.Duration) time.Duration { + if pg != nil && pg.Spec.ScheduleTimeoutSeconds != nil { + return time.Duration(*pg.Spec.ScheduleTimeoutSeconds) * time.Second + } + if scheduleTimeout != nil && *scheduleTimeout != 0 { + return *scheduleTimeout + } + return DefaultWaitTime +} diff --git a/apis/thirdparty/scheduler-plugins/pkg/util/resource.go b/apis/thirdparty/scheduler-plugins/pkg/util/resource.go new file mode 100644 index 000000000..2c9f0a3b7 --- /dev/null +++ b/apis/thirdparty/scheduler-plugins/pkg/util/resource.go @@ -0,0 +1,77 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/scheduler/framework" +) + +// ResourceList returns a resource list of this resource. +// Note: this code used to exist in k/k, but removed in k/k#101465. +func ResourceList(r *framework.Resource) v1.ResourceList { + result := v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(r.EphemeralStorage, resource.BinarySI), + } + for rName, rQuant := range r.ScalarResources { + if v1helper.IsHugePageResourceName(rName) { + result[rName] = *resource.NewQuantity(rQuant, resource.BinarySI) + } else { + result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI) + } + } + return result +} + +// GetPodEffectiveRequest gets the effective request resource of a pod to the origin resource. +// The Pod's effective request is the higher of: +// - the sum of all app containers(spec.Containers) request for a resource. +// - the effective init containers(spec.InitContainers) request for a resource. +// The effective init containers request is the highest request on all init containers. +func GetPodEffectiveRequest(pod *v1.Pod) v1.ResourceList { + initResources := make(v1.ResourceList) + resources := make(v1.ResourceList) + + for _, container := range pod.Spec.InitContainers { + for name, quantity := range container.Resources.Requests { + if q, ok := initResources[name]; ok && quantity.Cmp(q) <= 0 { + continue + } + initResources[name] = quantity + } + } + for _, container := range pod.Spec.Containers { + for name, quantity := range container.Resources.Requests { + if q, ok := resources[name]; ok { + quantity.Add(q) + } + resources[name] = quantity + } + } + for name, quantity := range initResources { + if q, ok := resources[name]; ok && quantity.Cmp(q) <= 0 { + continue + } + resources[name] = quantity + } + return resources +} diff --git a/cmd/koord-descheduler/app/options/insecure_serving.go b/cmd/koord-descheduler/app/options/insecure_serving.go index 0dd49333e..5aae630f2 100644 --- a/cmd/koord-descheduler/app/options/insecure_serving.go +++ b/cmd/koord-descheduler/app/options/insecure_serving.go @@ -31,8 +31,8 @@ import ( // CombinedInsecureServingOptions sets up to two insecure listeners for healthz and metrics. The flags // override the ComponentConfig and DeprecatedInsecureServingOptions values for both. type CombinedInsecureServingOptions struct { - Healthz *apiserveroptions.DeprecatedInsecureServingOptionsWithLoopback - Metrics *apiserveroptions.DeprecatedInsecureServingOptionsWithLoopback + Healthz *apiserveroptions.DeprecatedInsecureServingOptions + Metrics *apiserveroptions.DeprecatedInsecureServingOptions BindPort int // overrides the structs above on ApplyTo, ignored on ApplyToFromLoadedConfig BindAddress string // overrides the structs above on ApplyTo, ignored on ApplyToFromLoadedConfig @@ -56,11 +56,11 @@ func (o *CombinedInsecureServingOptions) applyTo(c *deschedulerappconfig.Config, updateAddressFromDeprecatedInsecureServingOptions(&componentConfig.HealthzBindAddress, o.Healthz) updateAddressFromDeprecatedInsecureServingOptions(&componentConfig.MetricsBindAddress, o.Metrics) - if err := o.Healthz.ApplyTo(&c.InsecureServing, &c.LoopbackClientConfig); err != nil { + if err := o.Healthz.ApplyTo(&c.InsecureServing); err != nil { return err } if o.Metrics != nil && (c.ComponentConfig.MetricsBindAddress != c.ComponentConfig.HealthzBindAddress || o.Healthz == nil) { - if err := o.Metrics.ApplyTo(&c.InsecureMetricsServing, &c.LoopbackClientConfig); err != nil { + if err := o.Metrics.ApplyTo(&c.InsecureMetricsServing); err != nil { return err } } @@ -100,7 +100,7 @@ func (o *CombinedInsecureServingOptions) ApplyToFromLoadedConfig(c *deschedulera return o.applyTo(c, componentConfig) } -func updateAddressFromDeprecatedInsecureServingOptions(addr *string, is *apiserveroptions.DeprecatedInsecureServingOptionsWithLoopback) { +func updateAddressFromDeprecatedInsecureServingOptions(addr *string, is *apiserveroptions.DeprecatedInsecureServingOptions) { if is == nil { *addr = "" return @@ -115,7 +115,7 @@ func updateAddressFromDeprecatedInsecureServingOptions(addr *string, is *apiserv } } -func updateDeprecatedInsecureServingOptionsFromAddress(is *apiserveroptions.DeprecatedInsecureServingOptionsWithLoopback, addr string) { +func updateDeprecatedInsecureServingOptionsFromAddress(is *apiserveroptions.DeprecatedInsecureServingOptions, addr string) { if is == nil { return } diff --git a/cmd/koord-descheduler/app/options/options.go b/cmd/koord-descheduler/app/options/options.go index 51a4ca2d4..d3859f97c 100644 --- a/cmd/koord-descheduler/app/options/options.go +++ b/cmd/koord-descheduler/app/options/options.go @@ -39,9 +39,12 @@ import ( componentbaseconfig "k8s.io/component-base/config" componentbaseoptions "k8s.io/component-base/config/options" "k8s.io/component-base/logs" + logsapi "k8s.io/component-base/logs/api/v1" "k8s.io/component-base/metrics" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" deschedulerappconfig "github.com/koordinator-sh/koordinator/cmd/koord-descheduler/app/config" deschedulerconfig "github.com/koordinator-sh/koordinator/pkg/descheduler/apis/config" @@ -95,11 +98,11 @@ func NewOptions() *Options { ComponentConfig: cfg, SecureServing: apiserveroptions.NewSecureServingOptions().WithLoopback(), CombinedInsecureServing: &CombinedInsecureServingOptions{ - Healthz: (&apiserveroptions.DeprecatedInsecureServingOptions{ + Healthz: &apiserveroptions.DeprecatedInsecureServingOptions{ BindNetwork: "tcp", - }).WithLoopback(), - Metrics: (&apiserveroptions.DeprecatedInsecureServingOptions{ - BindNetwork: "tcp"}).WithLoopback(), + }, + Metrics: &apiserveroptions.DeprecatedInsecureServingOptions{ + BindNetwork: "tcp"}, }, LeaderElection: &componentbaseconfig.LeaderElectionConfiguration{ LeaderElect: true, @@ -170,7 +173,7 @@ func (o *Options) initFlags() { componentbaseoptions.BindLeaderElectionFlags(o.LeaderElection, nfs.FlagSet("leader election")) utilfeature.DefaultMutableFeatureGate.AddFlag(nfs.FlagSet("feature gate")) o.Metrics.AddFlags(nfs.FlagSet("metrics")) - o.Logs.AddFlags(nfs.FlagSet("logs")) + logsapi.AddFlags(o.Logs, nfs.FlagSet("logs")) o.Flags = &nfs } @@ -267,10 +270,10 @@ func (o *Options) Config() (*deschedulerappconfig.Config, error) { mgrKubeConfig.AcceptContentTypes = "" mgr, err := ctrl.NewManager(&mgrKubeConfig, ctrl.Options{ Scheme: scheme, - MetricsBindAddress: "0", + Metrics: metricsserver.Options{BindAddress: "0"}, HealthProbeBindAddress: "0", LeaderElection: false, - SyncPeriod: nil, + Cache: cache.Options{SyncPeriod: nil}, NewClient: utilclient.NewClient, }) if err != nil { diff --git a/cmd/koord-descheduler/app/server.go b/cmd/koord-descheduler/app/server.go index e81c93472..96df06ceb 100644 --- a/cmd/koord-descheduler/app/server.go +++ b/cmd/koord-descheduler/app/server.go @@ -28,6 +28,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/fields" utilerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" genericapifilters "k8s.io/apiserver/pkg/endpoints/filters" apirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/server" @@ -35,6 +36,7 @@ import ( "k8s.io/apiserver/pkg/server/healthz" "k8s.io/apiserver/pkg/server/mux" "k8s.io/apiserver/pkg/server/routes" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/events" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/record" @@ -42,6 +44,8 @@ import ( "k8s.io/component-base/cli/globalflag" "k8s.io/component-base/configz" "k8s.io/component-base/logs" + logsapi "k8s.io/component-base/logs/api/v1" + "k8s.io/component-base/metrics/features" "k8s.io/component-base/metrics/legacyregistry" "k8s.io/component-base/term" "k8s.io/component-base/version" @@ -63,6 +67,11 @@ import ( "github.com/koordinator-sh/koordinator/pkg/util/transformer" ) +func init() { + utilruntime.Must(logsapi.AddFeatureGates(utilfeature.DefaultMutableFeatureGate)) + utilruntime.Must(features.AddFeatureGates(utilfeature.DefaultMutableFeatureGate)) +} + // Option configures a framework.Registry. type Option func(frameworkruntime.Registry) error @@ -107,7 +116,7 @@ func NewDeschedulerCommand(registryOptions ...Option) *cobra.Command { func runCommand(cmd *cobra.Command, opts *options.Options, registryOptions ...Option) error { // Activate logging as soon as possible, after that // show flags with the final logging configuration. - if err := opts.Logs.ValidateAndApply(nil); err != nil { + if err := logsapi.ValidateAndApply(opts.Logs, utilfeature.DefaultFeatureGate); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } diff --git a/cmd/koord-manager/main.go b/cmd/koord-manager/main.go index 029ae9bb4..3f6d682a7 100644 --- a/cmd/koord-manager/main.go +++ b/cmd/koord-manager/main.go @@ -28,12 +28,13 @@ import ( "github.com/spf13/pflag" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/rest" - "k8s.io/client-go/tools/leaderelection/resourcelock" _ "k8s.io/component-base/metrics/prometheus/clientgo" // load restclient and workqueue metrics "k8s.io/klog/v2" "k8s.io/klog/v2/klogr" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "github.com/koordinator-sh/koordinator/cmd/koord-manager/extensions" "github.com/koordinator-sh/koordinator/cmd/koord-manager/options" @@ -70,7 +71,7 @@ func main() { flag.BoolVar(&enableLeaderElection, "enable-leader-election", true, "Whether you need to enable leader election.") flag.StringVar(&leaderElectionNamespace, "leader-election-namespace", "koordinator-system", "This determines the namespace in which the leader election configmap will be created, it will use in-cluster namespace if empty.") - flag.StringVar(&leaderElectResourceLock, "leader-elect-resource-lock", resourcelock.ConfigMapsLeasesResourceLock, + flag.StringVar(&leaderElectResourceLock, "leader-elect-resource-lock", "leases", "The leader election resource lock for controller manager. e.g. 'leases', 'configmaps', 'endpoints', 'endpointsleases', 'configmapsleases'") flag.StringVar(&namespace, "namespace", "", "Namespace if specified restricts the manager's cache to watch objects in the desired namespace. Defaults to all namespaces.") @@ -116,18 +117,33 @@ func main() { syncPeriod = &d } } - mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + + mgrOpt := ctrl.Options{ Scheme: options.Scheme, - MetricsBindAddress: metricsAddr, + Metrics: metricsserver.Options{BindAddress: metricsAddr}, HealthProbeBindAddress: healthProbeAddr, LeaderElection: enableLeaderElection, LeaderElectionID: "koordinator-manager", LeaderElectionNamespace: leaderElectionNamespace, LeaderElectionResourceLock: leaderElectResourceLock, - Namespace: namespace, - SyncPeriod: syncPeriod, + Cache: cache.Options{SyncPeriod: syncPeriod}, NewClient: utilclient.NewClient, - }) + } + + if namespace != "" { + mgrOpt.Cache.DefaultNamespaces = map[string]cache.Config{} + mgrOpt.Cache.DefaultNamespaces[namespace] = cache.Config{} + } + + installMetricsHandler(mgrOpt) + ctx := ctrl.SetupSignalHandler() + + if utilfeature.DefaultFeatureGate.Enabled(features.WebhookFramework) { + setupLog.Info("setup webhook opt") + webhook.SetupWithWebhookOpt(&mgrOpt) + } + + mgr, err := ctrl.NewManager(cfg, mgrOpt) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) @@ -147,8 +163,6 @@ func main() { extensions.PrepareExtensions(cfg, mgr) // +kubebuilder:scaffold:builder - ctx := ctrl.SetupSignalHandler() - if utilfeature.DefaultFeatureGate.Enabled(features.WebhookFramework) { setupLog.Info("setup webhook") if err = webhook.SetupWithManager(mgr); err != nil { @@ -175,11 +189,6 @@ func main() { klog.V(4).Infof("webhook framework feature gate not enabled") } - if err := installHTTPHandler(mgr); err != nil { - setupLog.Error(err, "unable to install http handler") - os.Exit(1) - } - setupLog.Info("starting manager") extensions.StartExtensions(ctx, mgr) if err := mgr.Start(ctx); err != nil { @@ -197,17 +206,11 @@ func setRestConfig(c *rest.Config) { } } -func installHTTPHandler(mgr ctrl.Manager) error { - if err := mgr.AddMetricsExtraHandler(metrics.InternalHTTPPath, promhttp.HandlerFor(metrics.InternalRegistry, promhttp.HandlerOpts{})); err != nil { - return err - } - if err := mgr.AddMetricsExtraHandler(metrics.ExternalHTTPPath, promhttp.HandlerFor(metrics.ExternalRegistry, promhttp.HandlerOpts{})); err != nil { - return err - } - // merge internal, external and controller-runtime metrics - if err := mgr.AddMetricsExtraHandler(metrics.DefaultHTTPPath, promhttp.HandlerFor( - metricsutil.MergedGatherFunc(metrics.InternalRegistry, metrics.ExternalRegistry, ctrlmetrics.Registry), promhttp.HandlerOpts{})); err != nil { - return err +func installMetricsHandler(mgr ctrl.Options) { + mgr.Metrics.ExtraHandlers = map[string]http.Handler{ + metrics.InternalHTTPPath: promhttp.HandlerFor(metrics.InternalRegistry, promhttp.HandlerOpts{}), + metrics.ExternalHTTPPath: promhttp.HandlerFor(metrics.ExternalRegistry, promhttp.HandlerOpts{}), + metrics.DefaultHTTPPath: promhttp.HandlerFor( + metricsutil.MergedGatherFunc(metrics.InternalRegistry, metrics.ExternalRegistry, ctrlmetrics.Registry), promhttp.HandlerOpts{}), } - return nil } diff --git a/cmd/koord-manager/options/scheme.go b/cmd/koord-manager/options/scheme.go index c96361a18..0a40ca5a3 100644 --- a/cmd/koord-manager/options/scheme.go +++ b/cmd/koord-manager/options/scheme.go @@ -20,12 +20,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" configv1alpha1 "github.com/koordinator-sh/koordinator/apis/config/v1alpha1" quotav1alpha1 "github.com/koordinator-sh/koordinator/apis/quota/v1alpha1" schedulingv1alpha1 "github.com/koordinator-sh/koordinator/apis/scheduling/v1alpha1" slov1alpha1 "github.com/koordinator-sh/koordinator/apis/slo/v1alpha1" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" ) var Scheme = runtime.NewScheme() diff --git a/cmd/koord-scheduler/app/options/insecure_serving.go b/cmd/koord-scheduler/app/options/insecure_serving.go index 63a67ba1a..95f950c07 100644 --- a/cmd/koord-scheduler/app/options/insecure_serving.go +++ b/cmd/koord-scheduler/app/options/insecure_serving.go @@ -22,7 +22,6 @@ import ( "github.com/spf13/pflag" apiserveroptions "k8s.io/apiserver/pkg/server/options" - schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" schedulerappconfig "github.com/koordinator-sh/koordinator/cmd/koord-scheduler/app/config" ) @@ -30,7 +29,7 @@ import ( // CombinedInsecureServingOptions sets up to two insecure listeners for healthz and metrics. The flags // override the ComponentConfig and DeprecatedInsecureServingOptions values for both. type CombinedInsecureServingOptions struct { - Healthz *apiserveroptions.DeprecatedInsecureServingOptionsWithLoopback + Healthz *apiserveroptions.DeprecatedInsecureServingOptions BindPort int BindAddress string @@ -47,7 +46,7 @@ func (o *CombinedInsecureServingOptions) AddFlags(fs *pflag.FlagSet) { } // ApplyTo applies the insecure serving options to the given scheduler app configuration, and updates the componentConfig. -func (o *CombinedInsecureServingOptions) ApplyTo(c *schedulerappconfig.Config, componentConfig *schedulerconfig.KubeSchedulerConfiguration) error { +func (o *CombinedInsecureServingOptions) ApplyTo(c *schedulerappconfig.Config) error { if o == nil { return nil } @@ -57,7 +56,7 @@ func (o *CombinedInsecureServingOptions) ApplyTo(c *schedulerappconfig.Config, c o.Healthz.BindAddress = net.ParseIP(o.BindAddress) } - if err := o.Healthz.ApplyTo(&c.InsecureServing, &c.LoopbackClientConfig); err != nil { + if err := o.Healthz.ApplyTo(&c.InsecureServing); err != nil { return err } return nil diff --git a/cmd/koord-scheduler/app/options/options.go b/cmd/koord-scheduler/app/options/options.go index 08526fe9f..df54c3edd 100644 --- a/cmd/koord-scheduler/app/options/options.go +++ b/cmd/koord-scheduler/app/options/options.go @@ -17,6 +17,8 @@ limitations under the License. package options import ( + "context" + "github.com/gin-gonic/gin" "k8s.io/apimachinery/pkg/runtime" apiserveroptions "k8s.io/apiserver/pkg/server/options" @@ -40,9 +42,9 @@ func NewOptions() *Options { options := &Options{ Options: scheduleroptions.NewOptions(), CombinedInsecureServing: &CombinedInsecureServingOptions{ - Healthz: (&apiserveroptions.DeprecatedInsecureServingOptions{ + Healthz: &apiserveroptions.DeprecatedInsecureServingOptions{ BindNetwork: "tcp", - }).WithLoopback(), + }, }, } options.CombinedInsecureServing.AddFlags(options.Flags.FlagSet("insecure serving")) @@ -56,8 +58,8 @@ func (o *Options) Validate() []error { } // Config return a scheduler config object -func (o *Options) Config() (*schedulerappconfig.Config, error) { - config, err := o.Options.Config() +func (o *Options) Config(ctx context.Context) (*schedulerappconfig.Config, error) { + config, err := o.Options.Config(ctx) if err != nil { return nil, err } @@ -87,7 +89,7 @@ func (o *Options) Config() (*schedulerappconfig.Config, error) { KoordinatorSharedInformerFactory: koordinatorSharedInformerFactory, } - if err := o.CombinedInsecureServing.ApplyTo(appConfig, &config.ComponentConfig); err != nil { + if err := o.CombinedInsecureServing.ApplyTo(appConfig); err != nil { return nil, err } diff --git a/cmd/koord-scheduler/app/server.go b/cmd/koord-scheduler/app/server.go index 47e02ceaa..e628d7e45 100644 --- a/cmd/koord-scheduler/app/server.go +++ b/cmd/koord-scheduler/app/server.go @@ -25,6 +25,7 @@ import ( goruntime "runtime" "github.com/spf13/cobra" + utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/authentication/authenticator" @@ -45,7 +46,10 @@ import ( "k8s.io/component-base/cli/globalflag" "k8s.io/component-base/configz" "k8s.io/component-base/logs" + logsapi "k8s.io/component-base/logs/api/v1" + "k8s.io/component-base/metrics/features" "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/component-base/metrics/prometheus/slis" "k8s.io/component-base/term" "k8s.io/component-base/version" "k8s.io/component-base/version/verflag" @@ -71,7 +75,8 @@ import ( ) func init() { - utilruntime.Must(logs.AddFeatureGates(utilfeature.DefaultMutableFeatureGate)) + utilruntime.Must(logsapi.AddFeatureGates(utilfeature.DefaultMutableFeatureGate)) + utilruntime.Must(features.AddFeatureGates(utilfeature.DefaultMutableFeatureGate)) } // Option configures a framework.Registry. @@ -119,7 +124,7 @@ for cost reduction and efficiency enhancement. cliflag.SetUsageAndHelpFunc(cmd, *nfs, cols) if err := cmd.MarkFlagFilename("config", "yaml", "yml", "json"); err != nil { - klog.ErrorS(err, "Failed to mark flag filename") + klog.Background().Error(err, "Failed to mark flag filename") } return cmd @@ -134,7 +139,7 @@ func runCommand(cmd *cobra.Command, opts *options.Options, registryOptions ...Op // Activate logging as soon as possible, after that // show flags with the final logging configuration. - if err := opts.Logs.ValidateAndApply(utilfeature.DefaultFeatureGate); err != nil { + if err := logsapi.ValidateAndApply(opts.Logs, utilfeature.DefaultFeatureGate); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } @@ -152,16 +157,18 @@ func runCommand(cmd *cobra.Command, opts *options.Options, registryOptions ...Op if err != nil { return err } - + // add feature enablement metrics + utilfeature.DefaultMutableFeatureGate.AddMetrics() return Run(ctx, cc, sched, extendedHandle) } // Run executes the scheduler based on the given configuration. It only returns on error or when context is done. func Run(ctx context.Context, cc *schedulerserverconfig.CompletedConfig, sched *scheduler.Scheduler, extenderFactory *frameworkext.FrameworkExtenderFactory) error { + logger := klog.FromContext(ctx) // To help debugging, immediately log version - klog.V(1).InfoS("Starting Koordinator Scheduler version", "version", version.Get()) + logger.Info("Starting Koordinator Scheduler version", "version", version.Get()) - klog.InfoS("Golang settings", "GOGC", os.Getenv("GOGC"), "GOMAXPROCS", os.Getenv("GOMAXPROCS"), "GOTRACEBACK", os.Getenv("GOTRACEBACK")) + logger.Info("Golang settings", "GOGC", os.Getenv("GOGC"), "GOMAXPROCS", os.Getenv("GOMAXPROCS"), "GOTRACEBACK", os.Getenv("GOTRACEBACK")) // Configz registration. if cz, err := configz.New("componentconfig"); err == nil { @@ -170,8 +177,9 @@ func Run(ctx context.Context, cc *schedulerserverconfig.CompletedConfig, sched * return fmt.Errorf("unable to register configz: %s", err) } - // Prepare the event broadcaster. + // Start events processing pipeline. cc.EventBroadcaster.StartRecordingToSink(ctx.Done()) + defer cc.EventBroadcaster.Shutdown() // Setup healthz checks. var checks []healthz.HealthChecker @@ -193,13 +201,13 @@ func Run(ctx context.Context, cc *schedulerserverconfig.CompletedConfig, sched * // Start up the healthz server. if cc.InsecureServing != nil { - handler := buildHandlerChain(newAPIHandler(&cc.ComponentConfig, cc.InformerFactory, cc.ServicesEngine, sched, isLeader, false, checks...), nil, nil) + handler := buildHandlerChain(newHealthzAndMetricsHandler(&cc.ComponentConfig, cc.InformerFactory, cc.ServicesEngine, sched, isLeader, checks...), nil, nil) if err := cc.InsecureServing.Serve(handler, 0, ctx.Done()); err != nil { return fmt.Errorf("failed to start insecure server: %v", err) } } if cc.SecureServing != nil { - handler := buildHandlerChain(newAPIHandler(&cc.ComponentConfig, cc.InformerFactory, cc.ServicesEngine, sched, isLeader, false, checks...), cc.Authentication.Authenticator, cc.Authorization.Authorizer) + handler := buildHandlerChain(newHealthzAndMetricsHandler(&cc.ComponentConfig, cc.InformerFactory, cc.ServicesEngine, sched, isLeader, checks...), cc.Authentication.Authenticator, cc.Authorization.Authorizer) // TODO: handle stoppedCh and listenerStoppedCh returned by c.SecureServing.Serve if _, _, err := cc.SecureServing.Serve(handler, 0, ctx.Done()); err != nil { // fail early for secure handlers, removing the old error loop from above @@ -207,27 +215,43 @@ func Run(ctx context.Context, cc *schedulerserverconfig.CompletedConfig, sched * } } - // Start all informers. - cc.InformerFactory.Start(ctx.Done()) - // DynInformerFactory can be nil in tests. - if cc.DynInformerFactory != nil { - cc.DynInformerFactory.Start(ctx.Done()) - } - cc.KoordinatorSharedInformerFactory.Start(ctx.Done()) + startInformersAndWaitForSync := func(ctx context.Context) { + // Start all informers. + cc.InformerFactory.Start(ctx.Done()) + // DynInformerFactory can be nil in tests. + if cc.DynInformerFactory != nil { + cc.DynInformerFactory.Start(ctx.Done()) + } + cc.KoordinatorSharedInformerFactory.Start(ctx.Done()) - // Wait for all caches to sync before scheduling. - cc.InformerFactory.WaitForCacheSync(ctx.Done()) - // DynInformerFactory can be nil in tests. - if cc.DynInformerFactory != nil { - cc.DynInformerFactory.WaitForCacheSync(ctx.Done()) - } - cc.KoordinatorSharedInformerFactory.WaitForCacheSync(ctx.Done()) + // Wait for all caches to sync before scheduling. + cc.InformerFactory.WaitForCacheSync(ctx.Done()) + // DynInformerFactory can be nil in tests. + if cc.DynInformerFactory != nil { + cc.DynInformerFactory.WaitForCacheSync(ctx.Done()) + } + cc.KoordinatorSharedInformerFactory.WaitForCacheSync(ctx.Done()) + + // Wait for all handlers to sync (all items in the initial list delivered) before scheduling. + if err := sched.WaitForHandlersSync(ctx); err != nil { + logger.Error(err, "waiting for handlers to sync") + } + logger.V(3).Info("Handlers synced") + } + if !cc.ComponentConfig.DelayCacheUntilActive || cc.LeaderElection == nil { + startInformersAndWaitForSync(ctx) + } // If leader election is enabled, runCommand via LeaderElector until done and exit. if cc.LeaderElection != nil { cc.LeaderElection.Callbacks = leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { close(waitingForLeader) + if cc.ComponentConfig.DelayCacheUntilActive { + logger.Info("Starting informers and waiting for sync...") + startInformersAndWaitForSync(ctx) + logger.Info("Sync completed") + } go extenderFactory.Run() sched.Run(ctx) }, @@ -235,11 +259,11 @@ func Run(ctx context.Context, cc *schedulerserverconfig.CompletedConfig, sched * select { case <-ctx.Done(): // We were asked to terminate. Exit 0. - klog.InfoS("Requested to terminate, exiting") + logger.Info("Requested to terminate, exiting") os.Exit(0) default: // We lost the lock. - klog.ErrorS(nil, "Leaderelection lost") + logger.Error(nil, "Leaderelection lost") klog.FlushAndExit(klog.ExitFlushTimeout, 1) asynclog.FlushAndExit() } @@ -290,30 +314,24 @@ func installMetricHandler(pathRecorderMux *mux.PathRecorderMux, informers inform }) } -func installProfilingHandler(pathRecorderMux *mux.PathRecorderMux, enableContentionProfiling bool) { - routes.Profiling{}.Install(pathRecorderMux) - if enableContentionProfiling { - goruntime.SetBlockProfileRate(1) - } - // NOTE: Use utilroutes.DebugFlags instead of k8s.io/apiserver/pkg/server/routes.DebugFlags - // as using the latter will print a useless stack when installing multiple flags - debugFlags := utilroutes.NewDebugFlags(pathRecorderMux) - debugFlags.Install("v", utilroutes.StringFlagPutHandler(logs.GlogSetter)) - debugFlags.Install("s", utilroutes.StringFlagPutHandler(frameworkext.DebugScoresSetter)) - debugFlags.Install("f", utilroutes.StringFlagPutHandler(frameworkext.DebugFiltersSetter)) -} - -// newAPIHandler creates a healthz server from the config, and will also -// embed the metrics handler if the healthz and metrics address configurations -// are the same. -func newAPIHandler(config *kubeschedulerconfig.KubeSchedulerConfiguration, informers informers.SharedInformerFactory, engine *services.Engine, sched *scheduler.Scheduler, isLeader func() bool, separateMetrics bool, checks ...healthz.HealthChecker) http.Handler { +// newHealthzAndMetricsHandler creates a healthz server from the config, and will also +// embed the metrics handler. +func newHealthzAndMetricsHandler(config *kubeschedulerconfig.KubeSchedulerConfiguration, informers informers.SharedInformerFactory, engine *services.Engine, sched *scheduler.Scheduler, isLeader func() bool, checks ...healthz.HealthChecker) http.Handler { pathRecorderMux := mux.NewPathRecorderMux("koord-scheduler") healthz.InstallHandler(pathRecorderMux, checks...) - if !separateMetrics { - installMetricHandler(pathRecorderMux, informers, isLeader) + installMetricHandler(pathRecorderMux, informers, isLeader) + if utilfeature.DefaultFeatureGate.Enabled(features.ComponentSLIs) { + slis.SLIMetricsWithReset{}.Install(pathRecorderMux) } if config.EnableProfiling { - installProfilingHandler(pathRecorderMux, config.EnableContentionProfiling) + routes.Profiling{}.Install(pathRecorderMux) + if config.EnableContentionProfiling { + goruntime.SetBlockProfileRate(1) + } + debugFlags := utilroutes.NewDebugFlags(pathRecorderMux) + debugFlags.Install("v", utilroutes.StringFlagPutHandler(logs.GlogSetter)) + debugFlags.Install("s", utilroutes.StringFlagPutHandler(frameworkext.DebugScoresSetter)) + debugFlags.Install("f", utilroutes.StringFlagPutHandler(frameworkext.DebugFiltersSetter)) } services.InstallAPIHandler(pathRecorderMux, engine, sched, isLeader) return pathRecorderMux @@ -345,7 +363,7 @@ func Setup(ctx context.Context, opts *options.Options, outOfTreeRegistryOptions return nil, nil, nil, utilerrors.NewAggregate(errs) } - c, err := opts.Config() + c, err := opts.Config(ctx) if err != nil { return nil, nil, nil, err } @@ -381,11 +399,11 @@ func Setup(ctx context.Context, opts *options.Options, outOfTreeRegistryOptions recorderFactory := getRecorderFactory(&cc) completedProfiles := make([]kubeschedulerconfig.KubeSchedulerProfile, 0) // Create the scheduler. - sched, err := scheduler.New(cc.Client, + sched, err := scheduler.New(ctx, + cc.Client, cc.InformerFactory, cc.DynInformerFactory, recorderFactory, - ctx.Done(), scheduler.WithComponentConfigVersion(cc.ComponentConfig.TypeMeta.APIVersion), scheduler.WithKubeConfig(cc.KubeConfig), scheduler.WithProfiles(cc.ComponentConfig.Profiles...), @@ -404,7 +422,7 @@ func Setup(ctx context.Context, opts *options.Options, outOfTreeRegistryOptions if err != nil { return nil, nil, nil, err } - if err := scheduleroptions.LogOrWriteConfig(opts.WriteConfigTo, &cc.ComponentConfig, completedProfiles); err != nil { + if err := scheduleroptions.LogOrWriteConfig(klog.FromContext(ctx), opts.WriteConfigTo, &cc.ComponentConfig, completedProfiles); err != nil { return nil, nil, nil, err } diff --git a/cmd/koordlet/main.go b/cmd/koordlet/main.go index c4c6420c7..5e4e1e551 100644 --- a/cmd/koordlet/main.go +++ b/cmd/koordlet/main.go @@ -24,7 +24,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/component-base/logs" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/manager/signals" @@ -40,7 +39,7 @@ import ( func main() { cfg := config.NewConfiguration() cfg.InitFlags(flag.CommandLine) - logs.AddGoFlags(flag.CommandLine) + klog.InitFlags(nil) flag.Parse() go wait.Forever(klog.Flush, 5*time.Second) diff --git a/config/crd/bases/analysis.koordinator.sh_recommendations.yaml b/config/crd/bases/analysis.koordinator.sh_recommendations.yaml index 38b6c6460..89773844d 100644 --- a/config/crd/bases/analysis.koordinator.sh_recommendations.yaml +++ b/config/crd/bases/analysis.koordinator.sh_recommendations.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: recommendations.analysis.koordinator.sh spec: group: analysis.koordinator.sh @@ -39,14 +38,19 @@ spec: description: Recommendation is the Schema for the recommendations API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -65,25 +69,25 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If - the operator is In or NotIn, the values array must - be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced - during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -95,13 +99,13 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A - single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is "key", - the operator is "In", and the values array contains only - "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic type: description: Type indicates the type of target type: string @@ -136,42 +140,42 @@ spec: status of the distribution items: description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -185,11 +189,12 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/config.koordinator.sh_clustercolocationprofiles.yaml b/config/crd/bases/config.koordinator.sh_clustercolocationprofiles.yaml index fb5de6334..82ba0d7f9 100644 --- a/config/crd/bases/config.koordinator.sh_clustercolocationprofiles.yaml +++ b/config/crd/bases/config.koordinator.sh_clustercolocationprofiles.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: clustercolocationprofiles.config.koordinator.sh spec: group: config.koordinator.sh @@ -22,14 +21,19 @@ spec: API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -39,10 +43,9 @@ spec: annotationKeysMapping: additionalProperties: type: string - description: AnnotationKeysMapping describes the annotations that - needs to inject into Pod.Annotations with the same values. It sets - the Pod.Annotations[AnnotationsToAnnotations[k]] = Pod.Annotations[k] - for each key k. + description: |- + AnnotationKeysMapping describes the annotations that needs to inject into Pod.Annotations with the same values. + It sets the Pod.Annotations[AnnotationsToAnnotations[k]] = Pod.Annotations[k] for each key k. type: object annotations: additionalProperties: @@ -51,20 +54,20 @@ spec: into Pod.Annotations type: object koordinatorPriority: - description: KoordinatorPriority defines the Pod sub-priority in Koordinator. + description: |- + KoordinatorPriority defines the Pod sub-priority in Koordinator. The priority value will be injected into Pod as label koordinator.sh/priority. Various Koordinator components determine the priority of the Pod - in the Koordinator through KoordinatorPriority and the priority - value in PriorityClassName. The higher the value, the higher the - priority. + in the Koordinator through KoordinatorPriority and the priority value in PriorityClassName. + The higher the value, the higher the priority. format: int32 type: integer labelKeysMapping: additionalProperties: type: string - description: LabelKeysMapping describes the labels that needs to inject - into Pod.Labels with the same values. It sets the Pod.Labels[LabelsToLabels[k]] - = Pod.Labels[k] for each key k. + description: |- + LabelKeysMapping describes the labels that needs to inject into Pod.Labels with the same values. + It sets the Pod.Labels[LabelsToLabels[k]] = Pod.Labels[k] for each key k. type: object labels: additionalProperties: @@ -73,32 +76,33 @@ spec: Pod.Labels type: object namespaceSelector: - description: NamespaceSelector decides whether to mutate/validate - Pods if the namespace matches the selector. Default to the empty - LabelSelector, which matches everything. + description: |- + NamespaceSelector decides whether to mutate/validate Pods if the + namespace matches the selector. + Default to the empty LabelSelector, which matches everything. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string @@ -111,23 +115,24 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic patch: description: Patch indicates patching podTemplate that will be injected to the Pod. x-kubernetes-preserve-unknown-fields: true priorityClassName: - description: If specified, the priorityClassName and the priority - value defined in PriorityClass will be injected into the Pod. The - PriorityClassName, priority value in PriorityClassName and KoordinatorPriority - will affect the scheduling, preemption and other behaviors of Koordinator - system. + description: |- + If specified, the priorityClassName and the priority value defined in PriorityClass + will be injected into the Pod. + The PriorityClassName, priority value in PriorityClassName and + KoordinatorPriority will affect the scheduling, preemption and + other behaviors of Koordinator system. type: string probability: anyOf: @@ -137,8 +142,9 @@ spec: probability. x-kubernetes-int-or-string: true qosClass: - description: QoSClass describes the type of Koordinator QoS that the - Pod is running. The value will be injected into Pod as label koordinator.sh/qosClass. + description: |- + QoSClass describes the type of Koordinator QoS that the Pod is running. + The value will be injected into Pod as label koordinator.sh/qosClass. Options are LSE/LSR/LS/BE/SYSTEM. enum: - LSE @@ -152,32 +158,33 @@ spec: scheduler. type: string selector: - description: Selector decides whether to mutate/validate Pods if the - Pod matches the selector. Default to the empty LabelSelector, which - matches everything. + description: |- + Selector decides whether to mutate/validate Pods if the + Pod matches the selector. + Default to the empty LabelSelector, which matches everything. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string @@ -190,13 +197,13 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic type: object status: description: ClusterColocationProfileStatus represents information about diff --git a/config/crd/bases/quota.koordinator.sh_elasticquotaprofiles.yaml b/config/crd/bases/quota.koordinator.sh_elasticquotaprofiles.yaml index 14e135676..16ebcf731 100644 --- a/config/crd/bases/quota.koordinator.sh_elasticquotaprofiles.yaml +++ b/config/crd/bases/quota.koordinator.sh_elasticquotaprofiles.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: elasticquotaprofiles.quota.koordinator.sh spec: group: quota.koordinator.sh @@ -22,14 +21,19 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -42,24 +46,24 @@ spec: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector that - contains values, a key, and an operator that relates the key - and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship to - a set of values. Valid operators are In, NotIn, Exists - and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. If the - operator is In or NotIn, the values array must be non-empty. - If the operator is Exists or DoesNotExist, the values - array must be empty. This array is replaced during a strategic + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic merge patch. items: type: string @@ -72,13 +76,13 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. A single - {key,value} in the matchLabels map is equivalent to an element - of matchExpressions, whose key field is "key", the operator - is "In", and the values array contains only "value". The requirements - are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic quotaLabels: additionalProperties: type: string @@ -88,9 +92,9 @@ spec: description: QuotaName defines the associated quota name of the profile. type: string resourceRatio: - description: ResourceRatio is a ratio, we will use it to fix the resource - fragmentation problem. If the total resource is 100 and the resource - ratio is 0.9, the allocable resource is 100*0.9=90 + description: |- + ResourceRatio is a ratio, we will use it to fix the resource fragmentation problem. + If the total resource is 100 and the resource ratio is 0.9, the allocable resource is 100*0.9=90 type: string required: - nodeSelector diff --git a/config/crd/bases/scheduling.koordinator.sh_devices.yaml b/config/crd/bases/scheduling.koordinator.sh_devices.yaml index 164aa41e9..12faf7bfe 100644 --- a/config/crd/bases/scheduling.koordinator.sh_devices.yaml +++ b/config/crd/bases/scheduling.koordinator.sh_devices.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: devices.scheduling.koordinator.sh spec: group: scheduling.koordinator.sh @@ -20,14 +19,19 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object diff --git a/config/crd/bases/scheduling.koordinator.sh_podmigrationjobs.yaml b/config/crd/bases/scheduling.koordinator.sh_podmigrationjobs.yaml index e0bf9245b..42712b666 100644 --- a/config/crd/bases/scheduling.koordinator.sh_podmigrationjobs.yaml +++ b/config/crd/bases/scheduling.koordinator.sh_podmigrationjobs.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: podmigrationjobs.scheduling.koordinator.sh spec: group: scheduling.koordinator.sh @@ -52,14 +51,19 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -70,44 +74,49 @@ spec: Pod and preempted Pods properties: apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string dryRun: - description: 'When present, indicates that modifications should - not be persisted. An invalid or unrecognized dryRun directive - will result in an error response and no further processing of - the request. Valid values are: - All: all dry run stages will - be processed' + description: |- + When present, indicates that modifications should not be + persisted. An invalid or unrecognized dryRun directive will + result in an error response and no further processing of the + request. Valid values are: + - All: all dry run stages will be processed items: type: string type: array gracePeriodSeconds: - description: The duration in seconds before the object should - be deleted. Value must be non-negative integer. The value zero - indicates delete immediately. If this value is nil, the default - grace period for the specified type will be used. Defaults to - a per object value if not specified. zero means delete immediately. + description: |- + The duration in seconds before the object should be deleted. Value must be non-negative integer. + The value zero indicates delete immediately. If this value is nil, the default grace period for the + specified type will be used. + Defaults to a per object value if not specified. zero means delete immediately. format: int64 type: integer kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string orphanDependents: - description: 'Deprecated: please use the PropagationPolicy, this - field will be deprecated in 1.7. Should the dependent objects - be orphaned. If true/false, the "orphan" finalizer will be added - to/removed from the object''s finalizers list. Either this field - or PropagationPolicy may be set, but not both.' + description: |- + Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + Should the dependent objects be orphaned. If true/false, the "orphan" + finalizer will be added to/removed from the object's finalizers list. + Either this field or PropagationPolicy may be set, but not both. type: boolean preconditions: - description: Must be fulfilled before a deletion is carried out. - If not possible, a 409 Conflict status will be returned. + description: |- + Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + returned. properties: resourceVersion: description: Specifies the target ResourceVersion @@ -117,23 +126,26 @@ spec: type: string type: object propagationPolicy: - description: 'Whether and how garbage collection will be performed. + description: |- + Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. - The default policy is decided by the existing finalizer set - in the metadata.finalizers and the resource-specific default - policy. Acceptable values are: ''Orphan'' - orphan the dependents; - ''Background'' - allow the garbage collector to delete the dependents - in the background; ''Foreground'' - a cascading policy that - deletes all dependents in the foreground.' + The default policy is decided by the existing finalizer set in the + metadata.finalizers and the resource-specific default policy. + Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + allow the garbage collector to delete the dependents in the background; + 'Foreground' - a cascading policy that deletes all dependents in the + foreground. type: string type: object mode: - description: Mode represents the operating mode of the Job Default - is PodMigrationJobModeReservationFirst + description: |- + Mode represents the operating mode of the Job + Default is PodMigrationJobModeReservationFirst type: string paused: - description: Paused indicates whether the PodMigrationJob should to - work or not. Default is false + description: |- + Paused indicates whether the PodMigrationJob should to work or not. + Default is false type: boolean podRef: description: PodRef represents the Pod that be migrated @@ -142,84 +154,99 @@ spec: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object + x-kubernetes-map-type: atomic reservationOptions: description: ReservationOptions defines the Reservation options for migrated Pod properties: preemptionOptions: - description: PreemptionOption decides whether to preempt other - Pods. The preemption is safe and reserves resources for preempted - Pods. + description: |- + PreemptionOption decides whether to preempt other Pods. + The preemption is safe and reserves resources for preempted Pods. type: object reservationRef: - description: ReservationRef if specified, PodMigrationJob will - check if the status of Reservation is available. ReservationRef - if not specified, PodMigrationJob controller will create Reservation - by Template, and update the ReservationRef to reference the - Reservation + description: |- + ReservationRef if specified, PodMigrationJob will check if the status of Reservation is available. + ReservationRef if not specified, PodMigrationJob controller will create Reservation by Template, + and update the ReservationRef to reference the Reservation properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part - of an object. TODO: this design is not final and this field - is subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object + x-kubernetes-map-type: atomic template: description: Template is the object that describes the Reservation that will be created if not specified ReservationRef @@ -257,8 +284,9 @@ spec: last transition. type: string status: - description: Status is the status of the condition. Can be True, - False, Unknown. + description: |- + Status is the status of the condition. + Can be True, False, Unknown. type: string type: description: Type is the type of the condition. @@ -276,9 +304,9 @@ spec: description: NodeName represents the node's name of migrated Pod type: string phase: - description: PodMigrationJobPhase represents the phase of a PodMigrationJob - is a simple, high-level summary of where the PodMigrationJob is - in its lifecycle. e.g. Pending/Running/Failed + description: |- + PodMigrationJobPhase represents the phase of a PodMigrationJob is a simple, high-level summary of where the PodMigrationJob is in its lifecycle. + e.g. Pending/Running/Failed type: string podRef: description: PodRef represents the newly created Pod after being migrated @@ -287,97 +315,106 @@ spec: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object + x-kubernetes-map-type: atomic preemptedPodsRef: description: PreemptedPodsRef represents the Pods that be preempted items: - description: "ObjectReference contains enough information to let - you inspect or modify the referred object. --- New uses of this - type are discouraged because of difficulty describing its usage - when embedded in APIs. 1. Ignored fields. It includes many fields - which are not generally honored. For instance, ResourceVersion - and FieldPath are both very rarely valid in actual usage. 2. Invalid - usage help. It is impossible to add specific help for individual - usage. In most embedded usages, there are particular restrictions - like, \"must refer only to types A and B\" or \"UID not honored\" - or \"name must be restricted\". Those cannot be well described - when embedded. 3. Inconsistent validation. Because the usages - are different, the validation rules are different by usage, which - makes it hard for users to predict what will happen. 4. The fields - are both imprecise and overly precise. Kind is not a precise - mapping to a URL. This can produce ambiguity during interpretation - and require a REST mapping. In most cases, the dependency is - on the group,resource tuple and the version of the actual struct - is irrelevant. 5. We cannot easily change it. Because this type - is embedded in many locations, updates to this type will affect - numerous schemas. Don't make new APIs embed an underspecified - API type they do not control. \n Instead of using this type, create - a locally provided and used type that is well-focused on your - reference. For example, ServiceReferences for admission registration: - https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 - ." + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object + x-kubernetes-map-type: atomic type: array preemptedPodsReservation: description: PreemptedPodsReservations records information about Reservations @@ -401,68 +438,66 @@ spec: description: PodsRef represents the newly created Pods after being preempted items: - description: "ObjectReference contains enough information - to let you inspect or modify the referred object. --- New - uses of this type are discouraged because of difficulty - describing its usage when embedded in APIs. 1. Ignored fields. - \ It includes many fields which are not generally honored. - \ For instance, ResourceVersion and FieldPath are both very - rarely valid in actual usage. 2. Invalid usage help. It - is impossible to add specific help for individual usage. - \ In most embedded usages, there are particular restrictions - like, \"must refer only to types A and B\" or \"UID not - honored\" or \"name must be restricted\". Those cannot be - well described when embedded. 3. Inconsistent validation. - \ Because the usages are different, the validation rules - are different by usage, which makes it hard for users to - predict what will happen. 4. The fields are both imprecise - and overly precise. Kind is not a precise mapping to a - URL. This can produce ambiguity during interpretation and - require a REST mapping. In most cases, the dependency is - on the group,resource tuple and the version of the actual - struct is irrelevant. 5. We cannot easily change it. Because - this type is embedded in many locations, updates to this - type will affect numerous schemas. Don't make new APIs - embed an underspecified API type they do not control. \n - Instead of using this type, create a locally provided and - used type that is well-focused on your reference. For example, - ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 - ." + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this - pod). This syntax is chosen only to have some well-defined - way of referencing a part of an object. TODO: this design - is not final and this field is subject to change in - the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object + x-kubernetes-map-type: atomic type: array preemptedPodRef: description: PreemptedPodRef represents the Pod that be preempted @@ -471,36 +506,43 @@ spec: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this pod). - This syntax is chosen only to have some well-defined way - of referencing a part of an object. TODO: this design - is not final and this field is subject to change in the - future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object + x-kubernetes-map-type: atomic type: object type: array reason: @@ -508,7 +550,8 @@ spec: details about why the PodMigrationJob is in this state. type: string status: - description: Status represents the current status of PodMigrationJob + description: |- + Status represents the current status of PodMigrationJob e.g. ReservationCreated type: string type: object diff --git a/config/crd/bases/scheduling.koordinator.sh_reservations.yaml b/config/crd/bases/scheduling.koordinator.sh_reservations.yaml index 54bec4c0f..67cddeefe 100644 --- a/config/crd/bases/scheduling.koordinator.sh_reservations.yaml +++ b/config/crd/bases/scheduling.koordinator.sh_reservations.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: reservations.scheduling.koordinator.sh spec: group: scheduling.koordinator.sh @@ -35,19 +34,25 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: Reservation is the Schema for the reservation API. A Reservation - object is non-namespaced. Any namespaced affinity/anti-affinity of reservation - scheduling can be specified in the spec.template. + description: |- + Reservation is the Schema for the reservation API. + A Reservation object is non-namespaced. + Any namespaced affinity/anti-affinity of reservation scheduling can be specified in the spec.template. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -55,9 +60,9 @@ spec: properties: allocateOnce: default: true - description: When `AllocateOnce` is set, the reserved resources are - only available for the first owner who allocates successfully and - are not allocatable to other owners anymore. Defaults to true. + description: |- + When `AllocateOnce` is set, the reserved resources are only available for the first owner who allocates successfully + and are not allocatable to other owners anymore. Defaults to true. type: boolean allocatePolicy: description: AllocatePolicy represents the allocation policy of reserved @@ -67,14 +72,15 @@ spec: - Restricted type: string expires: - description: Expired timestamp when the reservation is expected to - expire. If both `expires` and `ttl` are set, `expires` is checked - first. `expires` and `ttl` are mutually exclusive. Defaults to being - set dynamically at runtime based on the `ttl`. + description: |- + Expired timestamp when the reservation is expected to expire. + If both `expires` and `ttl` are set, `expires` is checked first. + `expires` and `ttl` are mutually exclusive. Defaults to being set dynamically at runtime based on the `ttl`. format: date-time type: string owners: - description: Specify the owners who can allocate the reserved resources. + description: |- + Specify the owners who can allocate the reserved resources. Multiple owner selectors and ORed. items: description: ReservationOwner indicates the owner specification @@ -87,29 +93,36 @@ spec: description: API version of the referent. type: string blockOwnerDeletion: - description: If true, AND if the owner has the "foregroundDeletion" - finalizer, then the owner cannot be deleted from the key-value - store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion - for how the garbage collector interacts with this field - and enforces the foreground deletion. Defaults to false. - To set this field, a user needs "delete" permission of - the owner, otherwise 422 (Unprocessable Entity) will be - returned. + description: |- + If true, AND if the owner has the "foregroundDeletion" finalizer, then + the owner cannot be deleted from the key-value store until this + reference is removed. + See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion + for how the garbage collector interacts with this field and enforces the foreground deletion. + Defaults to false. + To set this field, a user needs "delete" permission of the owner, + otherwise 422 (Unprocessable Entity) will be returned. type: boolean controller: description: If true, this reference points to the managing controller. type: boolean kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names type: string namespace: type: string uid: - description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids type: string required: - apiVersion @@ -117,35 +130,36 @@ spec: - name - uid type: object + x-kubernetes-map-type: atomic labelSelector: - description: A label selector is a label query over a set of - resources. The result of matchLabels and matchExpressions - are ANDed. An empty label selector matches all objects. A - null label selector matches no objects. + description: |- + A label selector is a label query over a set of resources. The result of matchLabels and + matchExpressions are ANDed. An empty label selector matches all objects. A null + label selector matches no objects. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: A label selector requirement is a selector - that contains values, a key, and an operator that relates - the key and values. + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: key: description: key is the label key that the selector applies to. type: string operator: - description: operator represents a key's relationship - to a set of values. Valid operators are In, NotIn, - Exists and DoesNotExist. + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: values is an array of string values. - If the operator is In or NotIn, the values array - must be non-empty. If the operator is Exists or - DoesNotExist, the values array must be empty. This - array is replaced during a strategic merge patch. + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: type: string type: array @@ -157,13 +171,13 @@ spec: matchLabels: additionalProperties: type: string - description: matchLabels is a map of {key,value} pairs. - A single {key,value} in the matchLabels map is equivalent - to an element of matchExpressions, whose key field is - "key", the operator is "In", and the values array contains - only "value". The requirements are ANDed. + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object + x-kubernetes-map-type: atomic object: description: Multiple field selectors are ANDed. properties: @@ -171,59 +185,65 @@ spec: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead - of an entire object, this string should contain a valid - JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container - within a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that - triggered the event) or if no container name is specified - "spec.containers[2]" (container with index 2 in this pod). - This syntax is chosen only to have some well-defined way - of referencing a part of an object. TODO: this design - is not final and this field is subject to change in the - future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object + x-kubernetes-map-type: atomic type: object minItems: 1 type: array preAllocation: - description: By default, the resources requirements of reservation - (specified in `template.spec`) is filtered by whether the node has - sufficient free resources (i.e. Reservation Request < Node Free). - When `preAllocation` is set, the scheduler will skip this validation - and allow overcommitment. The scheduled reservation would be waiting - to be available until free resources are sufficient. + description: |- + By default, the resources requirements of reservation (specified in `template.spec`) is filtered by whether the + node has sufficient free resources (i.e. Reservation Request < Node Free). + When `preAllocation` is set, the scheduler will skip this validation and allow overcommitment. The scheduled + reservation would be waiting to be available until free resources are sufficient. type: boolean template: - description: Template defines the scheduling requirements (resources, - affinities, images, ...) processed by the scheduler just like a - normal pod. If the `template.spec.nodeName` is specified, the scheduler - will not choose another node but reserve resources on the specified - node. + description: |- + Template defines the scheduling requirements (resources, affinities, images, ...) processed by the scheduler just + like a normal pod. + If the `template.spec.nodeName` is specified, the scheduler will not choose another node but reserve resources on + the specified node. x-kubernetes-preserve-unknown-fields: true ttl: default: 24h - description: Time-to-Live period for the reservation. `expires` and - `ttl` are mutually exclusive. Defaults to 24h. Set 0 to disable - expiration. + description: |- + Time-to-Live period for the reservation. + `expires` and `ttl` are mutually exclusive. Defaults to 24h. Set 0 to disable expiration. type: string unschedulable: description: Unschedulable controls reservation schedulability of @@ -278,72 +298,74 @@ spec: description: Current resource owners which allocated the reservation resources. items: - description: "ObjectReference contains enough information to let - you inspect or modify the referred object. --- New uses of this - type are discouraged because of difficulty describing its usage - when embedded in APIs. 1. Ignored fields. It includes many fields - which are not generally honored. For instance, ResourceVersion - and FieldPath are both very rarely valid in actual usage. 2. Invalid - usage help. It is impossible to add specific help for individual - usage. In most embedded usages, there are particular restrictions - like, \"must refer only to types A and B\" or \"UID not honored\" - or \"name must be restricted\". Those cannot be well described - when embedded. 3. Inconsistent validation. Because the usages - are different, the validation rules are different by usage, which - makes it hard for users to predict what will happen. 4. The fields - are both imprecise and overly precise. Kind is not a precise - mapping to a URL. This can produce ambiguity during interpretation - and require a REST mapping. In most cases, the dependency is - on the group,resource tuple and the version of the actual struct - is irrelevant. 5. We cannot easily change it. Because this type - is embedded in many locations, updates to this type will affect - numerous schemas. Don't make new APIs embed an underspecified - API type they do not control. \n Instead of using this type, create - a locally provided and used type that is well-focused on your - reference. For example, ServiceReferences for admission registration: - https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 - ." + description: |- + ObjectReference contains enough information to let you inspect or modify the referred object. + --- + New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. + 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. + 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular + restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". + Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. + 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity + during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple + and the version of the actual struct is irrelevant. + 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. + + + Instead of using this type, create a locally provided and used type that is well-focused on your reference. + For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . properties: apiVersion: description: API version of the referent. type: string fieldPath: - description: 'If referring to a piece of an object instead of - an entire object, this string should contain a valid JSON/Go - field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within - a pod, this would take on a value like: "spec.containers{name}" - (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" - (container with index 2 in this pod). This syntax is chosen - only to have some well-defined way of referencing a part of - an object. TODO: this design is not final and this field is - subject to change in the future.' + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. type: string kind: - description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string namespace: - description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ type: string resourceVersion: - description: 'Specific resourceVersion to which this reference - is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency type: string uid: - description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids type: string type: object + x-kubernetes-map-type: atomic type: array nodeName: description: Name of node the reservation is scheduled on. type: string phase: - description: The `phase` indicates whether is reservation is waiting - for process, available to allocate or failed/expired to get cleanup. + description: |- + The `phase` indicates whether is reservation is waiting for process, available to allocate or failed/expired to + get cleanup. type: string type: object type: object diff --git a/config/crd/bases/scheduling.sigs.k8s.io_elasticquotas.yaml b/config/crd/bases/scheduling.sigs.k8s.io_elasticquotas.yaml index 24fb7ed8b..26972a840 100644 --- a/config/crd/bases/scheduling.sigs.k8s.io_elasticquotas.yaml +++ b/config/crd/bases/scheduling.sigs.k8s.io_elasticquotas.yaml @@ -3,9 +3,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - api-approved.kubernetes.io: https://github.com/kubernetes-sigs/scheduler-plugins/pull/52 # edited manually - controller-gen.kubebuilder.io/version: v0.6.2 - creationTimestamp: null + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/scheduler-plugins/pull/52 + controller-gen.kubebuilder.io/version: v0.14.0 name: elasticquotas.scheduling.sigs.k8s.io spec: group: scheduling.sigs.k8s.io @@ -14,9 +13,6 @@ spec: listKind: ElasticQuotaList plural: elasticquotas singular: elasticquota - shortNames: # edited manually - - eq # edited manually - - eqs # edited manually scope: Namespaced versions: - name: v1alpha1 @@ -25,14 +21,19 @@ spec: description: ElasticQuota sets elastic quota restrictions per namespace properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -46,9 +47,9 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: Max is the set of desired max limits for each named resource. - The usage of max is based on the resource configurations of successfully - scheduled pods. + description: |- + Max is the set of desired max limits for each named resource. The usage of max is based on the resource configurations of + successfully scheduled pods. type: object min: additionalProperties: @@ -78,9 +79,3 @@ spec: type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/config/crd/bases/scheduling.sigs.k8s.io_podgroups.yaml b/config/crd/bases/scheduling.sigs.k8s.io_podgroups.yaml index 613242c7d..b8d164d85 100644 --- a/config/crd/bases/scheduling.sigs.k8s.io_podgroups.yaml +++ b/config/crd/bases/scheduling.sigs.k8s.io_podgroups.yaml @@ -3,9 +3,8 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - api-approved.kubernetes.io: https://github.com/kubernetes-sigs/scheduler-plugins/pull/50 # edited manually - controller-gen.kubebuilder.io/version: v0.6.2 - creationTimestamp: null + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/scheduler-plugins/pull/52 + controller-gen.kubebuilder.io/version: v0.14.0 name: podgroups.scheduling.sigs.k8s.io spec: group: scheduling.sigs.k8s.io @@ -13,93 +12,93 @@ spec: kind: PodGroup listKind: PodGroupList plural: podgroups - shortNames: - - pg - - pgs singular: podgroup scope: Namespaced versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: PodGroup is a collection of Pod; used for batch workload. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Specification of the desired behavior of the pod group. - properties: - minMember: - description: MinMember defines the minimal number of members/tasks - to run the pod group; if there's not enough resources to start all - tasks, the scheduler will not start anyone. - format: int32 - type: integer - minResources: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: MinResources defines the minimal resource of members/tasks - to run the pod group; if there's not enough resources to start all - tasks, the scheduler will not start anyone. - type: object - scheduleTimeoutSeconds: - description: ScheduleTimeoutSeconds defines the maximal time of members/tasks - to wait before run the pod group; - format: int32 - type: integer - type: object - status: - description: Status represents the current information about a pod group. - This data may not be up to date. - properties: - failed: - description: The number of pods which reached phase Failed. - format: int32 - type: integer - occupiedBy: - description: OccupiedBy marks the workload (e.g., deployment, statefulset) - UID that occupy the podgroup. It is empty if not initialized. - type: string - phase: - description: Current phase of PodGroup. - type: string - running: - description: The number of actively running pods. - format: int32 - type: integer - scheduleStartTime: - description: ScheduleStartTime of the group - format: date-time - type: string - scheduled: - description: The number of actively running pods. - format: int32 - type: integer - succeeded: - description: The number of pods which reached phase Succeeded. - format: int32 - type: integer - type: object - type: object - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] + - name: v1alpha1 + schema: + openAPIV3Schema: + description: PodGroup is a collection of Pod; used for batch workload. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Specification of the desired behavior of the pod group. + properties: + minMember: + description: |- + MinMember defines the minimal number of members/tasks to run the pod group; + if there's not enough resources to start all tasks, the scheduler + will not start anyone. + format: int32 + type: integer + minResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + MinResources defines the minimal resource of members/tasks to run the pod group; + if there's not enough resources to start all tasks, the scheduler + will not start anyone. + type: object + scheduleTimeoutSeconds: + description: ScheduleTimeoutSeconds defines the maximal time of members/tasks + to wait before run the pod group; + format: int32 + type: integer + type: object + status: + description: |- + Status represents the current information about a pod group. + This data may not be up to date. + properties: + failed: + description: The number of pods which reached phase Failed. + format: int32 + type: integer + occupiedBy: + description: |- + OccupiedBy marks the workload (e.g., deployment, statefulset) UID that occupy the podgroup. + It is empty if not initialized. + type: string + phase: + description: Current phase of PodGroup. + type: string + running: + description: The number of actively running pods. + format: int32 + type: integer + scheduleStartTime: + description: ScheduleStartTime of the group + format: date-time + type: string + scheduled: + description: The number of actively running pods. + format: int32 + type: integer + succeeded: + description: The number of pods which reached phase Succeeded. + format: int32 + type: integer + type: object + type: object + served: true + storage: true diff --git a/config/crd/bases/slo.koordinator.sh_nodemetrics.yaml b/config/crd/bases/slo.koordinator.sh_nodemetrics.yaml index b23ea1139..5f0c4d2cc 100644 --- a/config/crd/bases/slo.koordinator.sh_nodemetrics.yaml +++ b/config/crd/bases/slo.koordinator.sh_nodemetrics.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: nodemetrics.slo.koordinator.sh spec: group: slo.koordinator.sh @@ -21,14 +20,19 @@ spec: description: NodeMetric is the Schema for the nodemetrics API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -346,8 +350,9 @@ spec: type: object type: array aggregatedSystemUsages: - description: AggregatedSystemUsages will report only if there - are enough samples Deleted pods will be excluded during aggregation + description: |- + AggregatedSystemUsages will report only if there are enough samples + Deleted pods will be excluded during aggregation items: properties: duration: diff --git a/config/crd/bases/slo.koordinator.sh_nodeslos.yaml b/config/crd/bases/slo.koordinator.sh_nodeslos.yaml index e279fddaa..88f1757b4 100644 --- a/config/crd/bases/slo.koordinator.sh_nodeslos.yaml +++ b/config/crd/bases/slo.koordinator.sh_nodeslos.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.0 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.14.0 name: nodeslos.slo.koordinator.sh spec: group: slo.koordinator.sh @@ -21,14 +20,19 @@ spec: description: NodeSLO is the Schema for the nodeslos API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -115,9 +119,9 @@ spec: ioCfg: properties: enableUserModel: - description: 'configure the cost model of blkio-cost - manually whether the user model is enabled. - Default value: false' + description: |- + configure the cost model of blkio-cost manually + whether the user model is enabled. Default value: false type: boolean ioWeightPercent: description: 'This field is used to set the @@ -160,31 +164,25 @@ spec: minimum: 1 type: integer readBPS: - description: Throttling of throughput The value - is set to 0, which indicates that the feature - is disabled. + description: |- + Throttling of throughput + The value is set to 0, which indicates that the feature is disabled. format: int64 minimum: 0 type: integer readIOPS: - description: Throttling of IOPS The value is - set to 0, which indicates that the feature - is disabled. + description: |- + Throttling of IOPS + The value is set to 0, which indicates that the feature is disabled. format: int64 minimum: 0 type: integer readLatency: - description: 'Configure the weight-based throttling - feature of blk-iocost Only used for RootClass - After blk-iocost is enabled, the kernel calculates - the proportion of requests that exceed the - read or write latency threshold out of all - requests. When the proportion is greater than - the read or write latency percentile (95%), - the kernel considers the disk to be saturated - and reduces the rate at which requests are - sent to the disk. the read latency threshold. - Unit: microseconds.' + description: |- + Configure the weight-based throttling feature of blk-iocost + Only used for RootClass + After blk-iocost is enabled, the kernel calculates the proportion of requests that exceed the read or write latency threshold out of all requests. When the proportion is greater than the read or write latency percentile (95%), the kernel considers the disk to be saturated and reduces the rate at which requests are sent to the disk. + the read latency threshold. Unit: microseconds. format: int64 type: integer readLatencyPercent: @@ -226,25 +224,26 @@ spec: description: CPUQOSCfg stores node-level config of cpu qos properties: coreExpeller: - description: 'whether pods of the QoS class can expel - the cgroup idle pods at the SMT-level. default = false - If set to true, pods of this QoS will use a dedicated - core sched group for noise clean with the SchedIdle - pods. NOTE: It takes effect if cpuPolicy = "coreSched".' + description: |- + whether pods of the QoS class can expel the cgroup idle pods at the SMT-level. default = false + If set to true, pods of this QoS will use a dedicated core sched group for noise clean with the SchedIdle pods. + NOTE: It takes effect if cpuPolicy = "coreSched". type: boolean enable: description: Enable indicates whether the cpu qos is enabled. type: boolean groupIdentity: - description: 'group identity value for pods, default = - 0 NOTE: It takes effect if cpuPolicy = "groupIdentity".' + description: |- + group identity value for pods, default = 0 + NOTE: It takes effect if cpuPolicy = "groupIdentity". format: int64 type: integer schedIdle: - description: 'cpu.idle value for pods, default = 0. `1` - means using SCHED_IDLE. CGroup Idle (introduced since - mainline Linux 5.15): https://lore.kernel.org/lkml/162971078674.25758.15464079371945307825.tip-bot2@tip-bot2/#r - NOTE: It takes effect if cpuPolicy = "coreSched".' + description: |- + cpu.idle value for pods, default = 0. + `1` means using SCHED_IDLE. + CGroup Idle (introduced since mainline Linux 5.15): https://lore.kernel.org/lkml/162971078674.25758.15464079371945307825.tip-bot2@tip-bot2/#r + NOTE: It takes effect if cpuPolicy = "coreSched". format: int64 type: integer type: object @@ -253,41 +252,33 @@ spec: qos properties: enable: - description: 'Enable indicates whether the memory qos - is enabled (default: false). This field is used for - node-level control, while pod-level configuration is - done with MemoryQOS and `Policy` instead of an `Enable` - option. Please view the differences between MemoryQOSCfg - and PodMemoryQOSConfig structs.' + description: |- + Enable indicates whether the memory qos is enabled (default: false). + This field is used for node-level control, while pod-level configuration is done with MemoryQOS and `Policy` + instead of an `Enable` option. Please view the differences between MemoryQOSCfg and PodMemoryQOSConfig structs. type: boolean lowLimitPercent: - description: 'LowLimitPercent specifies the lowLimitFactor - percentage to calculate `memory.low`, which TRIES BEST - protecting memory from global reclamation when memory - usage does not exceed the low limit unless no unprotected - memcg can be reclaimed. NOTE: `memory.low` should be - larger than `memory.min`. If spec.requests.memory == - spec.limits.memory, pod `memory.low` and `memory.high` - become invalid, while `memory.wmark_ratio` is still - in effect. Close: 0.' + description: |- + LowLimitPercent specifies the lowLimitFactor percentage to calculate `memory.low`, which TRIES BEST + protecting memory from global reclamation when memory usage does not exceed the low limit unless no unprotected + memcg can be reclaimed. + NOTE: `memory.low` should be larger than `memory.min`. If spec.requests.memory == spec.limits.memory, + pod `memory.low` and `memory.high` become invalid, while `memory.wmark_ratio` is still in effect. + Close: 0. format: int64 minimum: 0 type: integer minLimitPercent: - description: 'memcg qos If enabled, memcg qos will be - set by the agent, where some fields are implicitly calculated - from pod spec. 1. `memory.min` := spec.requests.memory - * minLimitFactor / 100 (use 0 if requests.memory is - not set) 2. `memory.low` := spec.requests.memory * lowLimitFactor - / 100 (use 0 if requests.memory is not set) 3. `memory.limit_in_bytes` - := spec.limits.memory (set $node.allocatable.memory - if limits.memory is not set) 4. `memory.high` := floor[(spec.requests.memory - + throttlingFactor / 100 * (memory.limit_in_bytes or - node allocatable memory - spec.requests.memory))/pageSize] - * pageSize MinLimitPercent specifies the minLimitFactor - percentage to calculate `memory.min`, which protects - memory from global reclamation when memory usage does - not exceed the min limit. Close: 0.' + description: |- + memcg qos + If enabled, memcg qos will be set by the agent, where some fields are implicitly calculated from pod spec. + 1. `memory.min` := spec.requests.memory * minLimitFactor / 100 (use 0 if requests.memory is not set) + 2. `memory.low` := spec.requests.memory * lowLimitFactor / 100 (use 0 if requests.memory is not set) + 3. `memory.limit_in_bytes` := spec.limits.memory (set $node.allocatable.memory if limits.memory is not set) + 4. `memory.high` := floor[(spec.requests.memory + throttlingFactor / 100 * (memory.limit_in_bytes or node allocatable memory - spec.requests.memory))/pageSize] * pageSize + MinLimitPercent specifies the minLimitFactor percentage to calculate `memory.min`, which protects memory + from global reclamation when memory usage does not exceed the min limit. + Close: 0. format: int64 minimum: 0 type: integer @@ -303,48 +294,46 @@ spec: format: int64 type: integer throttlingPercent: - description: 'ThrottlingPercent specifies the throttlingFactor - percentage to calculate `memory.high` with pod memory.limits - or node allocatable memory, which triggers memcg direct - reclamation when memory usage exceeds. Lower the factor - brings more heavier reclaim pressure. Close: 0.' + description: |- + ThrottlingPercent specifies the throttlingFactor percentage to calculate `memory.high` with pod + memory.limits or node allocatable memory, which triggers memcg direct reclamation when memory usage exceeds. + Lower the factor brings more heavier reclaim pressure. + Close: 0. format: int64 minimum: 0 type: integer wmarkMinAdj: - description: 'wmark_min_adj (Anolis OS required) WmarkMinAdj - specifies `memory.wmark_min_adj` which adjusts per-memcg - threshold for global memory reclamation. Lower the factor - brings later reclamation. The adjustment uses different - formula for different value range. [-25, 0):global_wmark_min'' - = global_wmark_min + (global_wmark_min - 0) * wmarkMinAdj - (0, 50]:global_wmark_min'' = global_wmark_min + (global_wmark_low - - global_wmark_min) * wmarkMinAdj Close: [LSR:0, LS:0, - BE:0]. Recommended: [LSR:-25, LS:-25, BE:50].' + description: |- + wmark_min_adj (Anolis OS required) + WmarkMinAdj specifies `memory.wmark_min_adj` which adjusts per-memcg threshold for global memory + reclamation. Lower the factor brings later reclamation. + The adjustment uses different formula for different value range. + [-25, 0):global_wmark_min' = global_wmark_min + (global_wmark_min - 0) * wmarkMinAdj + (0, 50]:global_wmark_min' = global_wmark_min + (global_wmark_low - global_wmark_min) * wmarkMinAdj + Close: [LSR:0, LS:0, BE:0]. Recommended: [LSR:-25, LS:-25, BE:50]. format: int64 maximum: 50 minimum: -25 type: integer wmarkRatio: - description: 'wmark_ratio (Anolis OS required) Async memory - reclamation is triggered when cgroup memory usage exceeds - `memory.wmark_high` and the reclamation stops when usage - is below `memory.wmark_low`. Basically, `memory.wmark_high` - := min(memory.high, memory.limit_in_bytes) * memory.memory.wmark_ratio - `memory.wmark_low` := min(memory.high, memory.limit_in_bytes) - * (memory.wmark_ratio - memory.wmark_scale_factor) WmarkRatio - specifies `memory.wmark_ratio` that help calculate `memory.wmark_high`, - which triggers async memory reclamation when memory - usage exceeds. Close: 0. Recommended: 95.' + description: |- + wmark_ratio (Anolis OS required) + Async memory reclamation is triggered when cgroup memory usage exceeds `memory.wmark_high` and the reclamation + stops when usage is below `memory.wmark_low`. Basically, + `memory.wmark_high` := min(memory.high, memory.limit_in_bytes) * memory.memory.wmark_ratio + `memory.wmark_low` := min(memory.high, memory.limit_in_bytes) * (memory.wmark_ratio - memory.wmark_scale_factor) + WmarkRatio specifies `memory.wmark_ratio` that help calculate `memory.wmark_high`, which triggers async + memory reclamation when memory usage exceeds. + Close: 0. Recommended: 95. format: int64 maximum: 100 minimum: 0 type: integer wmarkScalePermill: - description: 'WmarkScalePermill specifies `memory.wmark_scale_factor` - that helps calculate `memory.wmark_low`, which stops - async memory reclamation when memory usage belows. Close: - 50. Recommended: 20.' + description: |- + WmarkScalePermill specifies `memory.wmark_scale_factor` that helps calculate `memory.wmark_low`, which + stops async memory reclamation when memory usage belows. + Close: 50. Recommended: 20. format: int64 maximum: 1000 minimum: 1 @@ -357,24 +346,22 @@ spec: - type: integer - type: string default: 100 - description: 'EgressLimit describes the maximum network - bandwidth can be used in the egress direction, unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + EgressLimit describes the maximum network bandwidth can be used in the egress direction, + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true egressRequest: anyOf: - type: integer - type: string default: 0 - description: 'EgressRequest describes the minimum network - bandwidth guaranteed in the egress direction. unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + EgressRequest describes the minimum network bandwidth guaranteed in the egress direction. + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true enable: type: boolean @@ -383,24 +370,22 @@ spec: - type: integer - type: string default: 100 - description: 'IngressLimit describes the maximum network - bandwidth can be used in the ingress direction, unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + IngressLimit describes the maximum network bandwidth can be used in the ingress direction, + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true ingressRequest: anyOf: - type: integer - type: string default: 0 - description: 'IngressRequest describes the minimum network - bandwidth guaranteed in the ingress direction. unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + IngressRequest describes the minimum network bandwidth guaranteed in the ingress direction. + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true type: object resctrlQOS: @@ -442,9 +427,9 @@ spec: ioCfg: properties: enableUserModel: - description: 'configure the cost model of blkio-cost - manually whether the user model is enabled. - Default value: false' + description: |- + configure the cost model of blkio-cost manually + whether the user model is enabled. Default value: false type: boolean ioWeightPercent: description: 'This field is used to set the @@ -487,31 +472,25 @@ spec: minimum: 1 type: integer readBPS: - description: Throttling of throughput The value - is set to 0, which indicates that the feature - is disabled. + description: |- + Throttling of throughput + The value is set to 0, which indicates that the feature is disabled. format: int64 minimum: 0 type: integer readIOPS: - description: Throttling of IOPS The value is - set to 0, which indicates that the feature - is disabled. + description: |- + Throttling of IOPS + The value is set to 0, which indicates that the feature is disabled. format: int64 minimum: 0 type: integer readLatency: - description: 'Configure the weight-based throttling - feature of blk-iocost Only used for RootClass - After blk-iocost is enabled, the kernel calculates - the proportion of requests that exceed the - read or write latency threshold out of all - requests. When the proportion is greater than - the read or write latency percentile (95%), - the kernel considers the disk to be saturated - and reduces the rate at which requests are - sent to the disk. the read latency threshold. - Unit: microseconds.' + description: |- + Configure the weight-based throttling feature of blk-iocost + Only used for RootClass + After blk-iocost is enabled, the kernel calculates the proportion of requests that exceed the read or write latency threshold out of all requests. When the proportion is greater than the read or write latency percentile (95%), the kernel considers the disk to be saturated and reduces the rate at which requests are sent to the disk. + the read latency threshold. Unit: microseconds. format: int64 type: integer readLatencyPercent: @@ -553,25 +532,26 @@ spec: description: CPUQOSCfg stores node-level config of cpu qos properties: coreExpeller: - description: 'whether pods of the QoS class can expel - the cgroup idle pods at the SMT-level. default = false - If set to true, pods of this QoS will use a dedicated - core sched group for noise clean with the SchedIdle - pods. NOTE: It takes effect if cpuPolicy = "coreSched".' + description: |- + whether pods of the QoS class can expel the cgroup idle pods at the SMT-level. default = false + If set to true, pods of this QoS will use a dedicated core sched group for noise clean with the SchedIdle pods. + NOTE: It takes effect if cpuPolicy = "coreSched". type: boolean enable: description: Enable indicates whether the cpu qos is enabled. type: boolean groupIdentity: - description: 'group identity value for pods, default = - 0 NOTE: It takes effect if cpuPolicy = "groupIdentity".' + description: |- + group identity value for pods, default = 0 + NOTE: It takes effect if cpuPolicy = "groupIdentity". format: int64 type: integer schedIdle: - description: 'cpu.idle value for pods, default = 0. `1` - means using SCHED_IDLE. CGroup Idle (introduced since - mainline Linux 5.15): https://lore.kernel.org/lkml/162971078674.25758.15464079371945307825.tip-bot2@tip-bot2/#r - NOTE: It takes effect if cpuPolicy = "coreSched".' + description: |- + cpu.idle value for pods, default = 0. + `1` means using SCHED_IDLE. + CGroup Idle (introduced since mainline Linux 5.15): https://lore.kernel.org/lkml/162971078674.25758.15464079371945307825.tip-bot2@tip-bot2/#r + NOTE: It takes effect if cpuPolicy = "coreSched". format: int64 type: integer type: object @@ -580,41 +560,33 @@ spec: qos properties: enable: - description: 'Enable indicates whether the memory qos - is enabled (default: false). This field is used for - node-level control, while pod-level configuration is - done with MemoryQOS and `Policy` instead of an `Enable` - option. Please view the differences between MemoryQOSCfg - and PodMemoryQOSConfig structs.' + description: |- + Enable indicates whether the memory qos is enabled (default: false). + This field is used for node-level control, while pod-level configuration is done with MemoryQOS and `Policy` + instead of an `Enable` option. Please view the differences between MemoryQOSCfg and PodMemoryQOSConfig structs. type: boolean lowLimitPercent: - description: 'LowLimitPercent specifies the lowLimitFactor - percentage to calculate `memory.low`, which TRIES BEST - protecting memory from global reclamation when memory - usage does not exceed the low limit unless no unprotected - memcg can be reclaimed. NOTE: `memory.low` should be - larger than `memory.min`. If spec.requests.memory == - spec.limits.memory, pod `memory.low` and `memory.high` - become invalid, while `memory.wmark_ratio` is still - in effect. Close: 0.' + description: |- + LowLimitPercent specifies the lowLimitFactor percentage to calculate `memory.low`, which TRIES BEST + protecting memory from global reclamation when memory usage does not exceed the low limit unless no unprotected + memcg can be reclaimed. + NOTE: `memory.low` should be larger than `memory.min`. If spec.requests.memory == spec.limits.memory, + pod `memory.low` and `memory.high` become invalid, while `memory.wmark_ratio` is still in effect. + Close: 0. format: int64 minimum: 0 type: integer minLimitPercent: - description: 'memcg qos If enabled, memcg qos will be - set by the agent, where some fields are implicitly calculated - from pod spec. 1. `memory.min` := spec.requests.memory - * minLimitFactor / 100 (use 0 if requests.memory is - not set) 2. `memory.low` := spec.requests.memory * lowLimitFactor - / 100 (use 0 if requests.memory is not set) 3. `memory.limit_in_bytes` - := spec.limits.memory (set $node.allocatable.memory - if limits.memory is not set) 4. `memory.high` := floor[(spec.requests.memory - + throttlingFactor / 100 * (memory.limit_in_bytes or - node allocatable memory - spec.requests.memory))/pageSize] - * pageSize MinLimitPercent specifies the minLimitFactor - percentage to calculate `memory.min`, which protects - memory from global reclamation when memory usage does - not exceed the min limit. Close: 0.' + description: |- + memcg qos + If enabled, memcg qos will be set by the agent, where some fields are implicitly calculated from pod spec. + 1. `memory.min` := spec.requests.memory * minLimitFactor / 100 (use 0 if requests.memory is not set) + 2. `memory.low` := spec.requests.memory * lowLimitFactor / 100 (use 0 if requests.memory is not set) + 3. `memory.limit_in_bytes` := spec.limits.memory (set $node.allocatable.memory if limits.memory is not set) + 4. `memory.high` := floor[(spec.requests.memory + throttlingFactor / 100 * (memory.limit_in_bytes or node allocatable memory - spec.requests.memory))/pageSize] * pageSize + MinLimitPercent specifies the minLimitFactor percentage to calculate `memory.min`, which protects memory + from global reclamation when memory usage does not exceed the min limit. + Close: 0. format: int64 minimum: 0 type: integer @@ -630,48 +602,46 @@ spec: format: int64 type: integer throttlingPercent: - description: 'ThrottlingPercent specifies the throttlingFactor - percentage to calculate `memory.high` with pod memory.limits - or node allocatable memory, which triggers memcg direct - reclamation when memory usage exceeds. Lower the factor - brings more heavier reclaim pressure. Close: 0.' + description: |- + ThrottlingPercent specifies the throttlingFactor percentage to calculate `memory.high` with pod + memory.limits or node allocatable memory, which triggers memcg direct reclamation when memory usage exceeds. + Lower the factor brings more heavier reclaim pressure. + Close: 0. format: int64 minimum: 0 type: integer wmarkMinAdj: - description: 'wmark_min_adj (Anolis OS required) WmarkMinAdj - specifies `memory.wmark_min_adj` which adjusts per-memcg - threshold for global memory reclamation. Lower the factor - brings later reclamation. The adjustment uses different - formula for different value range. [-25, 0):global_wmark_min'' - = global_wmark_min + (global_wmark_min - 0) * wmarkMinAdj - (0, 50]:global_wmark_min'' = global_wmark_min + (global_wmark_low - - global_wmark_min) * wmarkMinAdj Close: [LSR:0, LS:0, - BE:0]. Recommended: [LSR:-25, LS:-25, BE:50].' + description: |- + wmark_min_adj (Anolis OS required) + WmarkMinAdj specifies `memory.wmark_min_adj` which adjusts per-memcg threshold for global memory + reclamation. Lower the factor brings later reclamation. + The adjustment uses different formula for different value range. + [-25, 0):global_wmark_min' = global_wmark_min + (global_wmark_min - 0) * wmarkMinAdj + (0, 50]:global_wmark_min' = global_wmark_min + (global_wmark_low - global_wmark_min) * wmarkMinAdj + Close: [LSR:0, LS:0, BE:0]. Recommended: [LSR:-25, LS:-25, BE:50]. format: int64 maximum: 50 minimum: -25 type: integer wmarkRatio: - description: 'wmark_ratio (Anolis OS required) Async memory - reclamation is triggered when cgroup memory usage exceeds - `memory.wmark_high` and the reclamation stops when usage - is below `memory.wmark_low`. Basically, `memory.wmark_high` - := min(memory.high, memory.limit_in_bytes) * memory.memory.wmark_ratio - `memory.wmark_low` := min(memory.high, memory.limit_in_bytes) - * (memory.wmark_ratio - memory.wmark_scale_factor) WmarkRatio - specifies `memory.wmark_ratio` that help calculate `memory.wmark_high`, - which triggers async memory reclamation when memory - usage exceeds. Close: 0. Recommended: 95.' + description: |- + wmark_ratio (Anolis OS required) + Async memory reclamation is triggered when cgroup memory usage exceeds `memory.wmark_high` and the reclamation + stops when usage is below `memory.wmark_low`. Basically, + `memory.wmark_high` := min(memory.high, memory.limit_in_bytes) * memory.memory.wmark_ratio + `memory.wmark_low` := min(memory.high, memory.limit_in_bytes) * (memory.wmark_ratio - memory.wmark_scale_factor) + WmarkRatio specifies `memory.wmark_ratio` that help calculate `memory.wmark_high`, which triggers async + memory reclamation when memory usage exceeds. + Close: 0. Recommended: 95. format: int64 maximum: 100 minimum: 0 type: integer wmarkScalePermill: - description: 'WmarkScalePermill specifies `memory.wmark_scale_factor` - that helps calculate `memory.wmark_low`, which stops - async memory reclamation when memory usage belows. Close: - 50. Recommended: 20.' + description: |- + WmarkScalePermill specifies `memory.wmark_scale_factor` that helps calculate `memory.wmark_low`, which + stops async memory reclamation when memory usage belows. + Close: 50. Recommended: 20. format: int64 maximum: 1000 minimum: 1 @@ -684,24 +654,22 @@ spec: - type: integer - type: string default: 100 - description: 'EgressLimit describes the maximum network - bandwidth can be used in the egress direction, unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + EgressLimit describes the maximum network bandwidth can be used in the egress direction, + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true egressRequest: anyOf: - type: integer - type: string default: 0 - description: 'EgressRequest describes the minimum network - bandwidth guaranteed in the egress direction. unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + EgressRequest describes the minimum network bandwidth guaranteed in the egress direction. + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true enable: type: boolean @@ -710,24 +678,22 @@ spec: - type: integer - type: string default: 100 - description: 'IngressLimit describes the maximum network - bandwidth can be used in the ingress direction, unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + IngressLimit describes the maximum network bandwidth can be used in the ingress direction, + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true ingressRequest: anyOf: - type: integer - type: string default: 0 - description: 'IngressRequest describes the minimum network - bandwidth guaranteed in the ingress direction. unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + IngressRequest describes the minimum network bandwidth guaranteed in the ingress direction. + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true type: object resctrlQOS: @@ -769,9 +735,9 @@ spec: ioCfg: properties: enableUserModel: - description: 'configure the cost model of blkio-cost - manually whether the user model is enabled. - Default value: false' + description: |- + configure the cost model of blkio-cost manually + whether the user model is enabled. Default value: false type: boolean ioWeightPercent: description: 'This field is used to set the @@ -814,31 +780,25 @@ spec: minimum: 1 type: integer readBPS: - description: Throttling of throughput The value - is set to 0, which indicates that the feature - is disabled. + description: |- + Throttling of throughput + The value is set to 0, which indicates that the feature is disabled. format: int64 minimum: 0 type: integer readIOPS: - description: Throttling of IOPS The value is - set to 0, which indicates that the feature - is disabled. + description: |- + Throttling of IOPS + The value is set to 0, which indicates that the feature is disabled. format: int64 minimum: 0 type: integer readLatency: - description: 'Configure the weight-based throttling - feature of blk-iocost Only used for RootClass - After blk-iocost is enabled, the kernel calculates - the proportion of requests that exceed the - read or write latency threshold out of all - requests. When the proportion is greater than - the read or write latency percentile (95%), - the kernel considers the disk to be saturated - and reduces the rate at which requests are - sent to the disk. the read latency threshold. - Unit: microseconds.' + description: |- + Configure the weight-based throttling feature of blk-iocost + Only used for RootClass + After blk-iocost is enabled, the kernel calculates the proportion of requests that exceed the read or write latency threshold out of all requests. When the proportion is greater than the read or write latency percentile (95%), the kernel considers the disk to be saturated and reduces the rate at which requests are sent to the disk. + the read latency threshold. Unit: microseconds. format: int64 type: integer readLatencyPercent: @@ -880,25 +840,26 @@ spec: description: CPUQOSCfg stores node-level config of cpu qos properties: coreExpeller: - description: 'whether pods of the QoS class can expel - the cgroup idle pods at the SMT-level. default = false - If set to true, pods of this QoS will use a dedicated - core sched group for noise clean with the SchedIdle - pods. NOTE: It takes effect if cpuPolicy = "coreSched".' + description: |- + whether pods of the QoS class can expel the cgroup idle pods at the SMT-level. default = false + If set to true, pods of this QoS will use a dedicated core sched group for noise clean with the SchedIdle pods. + NOTE: It takes effect if cpuPolicy = "coreSched". type: boolean enable: description: Enable indicates whether the cpu qos is enabled. type: boolean groupIdentity: - description: 'group identity value for pods, default = - 0 NOTE: It takes effect if cpuPolicy = "groupIdentity".' + description: |- + group identity value for pods, default = 0 + NOTE: It takes effect if cpuPolicy = "groupIdentity". format: int64 type: integer schedIdle: - description: 'cpu.idle value for pods, default = 0. `1` - means using SCHED_IDLE. CGroup Idle (introduced since - mainline Linux 5.15): https://lore.kernel.org/lkml/162971078674.25758.15464079371945307825.tip-bot2@tip-bot2/#r - NOTE: It takes effect if cpuPolicy = "coreSched".' + description: |- + cpu.idle value for pods, default = 0. + `1` means using SCHED_IDLE. + CGroup Idle (introduced since mainline Linux 5.15): https://lore.kernel.org/lkml/162971078674.25758.15464079371945307825.tip-bot2@tip-bot2/#r + NOTE: It takes effect if cpuPolicy = "coreSched". format: int64 type: integer type: object @@ -907,41 +868,33 @@ spec: qos properties: enable: - description: 'Enable indicates whether the memory qos - is enabled (default: false). This field is used for - node-level control, while pod-level configuration is - done with MemoryQOS and `Policy` instead of an `Enable` - option. Please view the differences between MemoryQOSCfg - and PodMemoryQOSConfig structs.' + description: |- + Enable indicates whether the memory qos is enabled (default: false). + This field is used for node-level control, while pod-level configuration is done with MemoryQOS and `Policy` + instead of an `Enable` option. Please view the differences between MemoryQOSCfg and PodMemoryQOSConfig structs. type: boolean lowLimitPercent: - description: 'LowLimitPercent specifies the lowLimitFactor - percentage to calculate `memory.low`, which TRIES BEST - protecting memory from global reclamation when memory - usage does not exceed the low limit unless no unprotected - memcg can be reclaimed. NOTE: `memory.low` should be - larger than `memory.min`. If spec.requests.memory == - spec.limits.memory, pod `memory.low` and `memory.high` - become invalid, while `memory.wmark_ratio` is still - in effect. Close: 0.' + description: |- + LowLimitPercent specifies the lowLimitFactor percentage to calculate `memory.low`, which TRIES BEST + protecting memory from global reclamation when memory usage does not exceed the low limit unless no unprotected + memcg can be reclaimed. + NOTE: `memory.low` should be larger than `memory.min`. If spec.requests.memory == spec.limits.memory, + pod `memory.low` and `memory.high` become invalid, while `memory.wmark_ratio` is still in effect. + Close: 0. format: int64 minimum: 0 type: integer minLimitPercent: - description: 'memcg qos If enabled, memcg qos will be - set by the agent, where some fields are implicitly calculated - from pod spec. 1. `memory.min` := spec.requests.memory - * minLimitFactor / 100 (use 0 if requests.memory is - not set) 2. `memory.low` := spec.requests.memory * lowLimitFactor - / 100 (use 0 if requests.memory is not set) 3. `memory.limit_in_bytes` - := spec.limits.memory (set $node.allocatable.memory - if limits.memory is not set) 4. `memory.high` := floor[(spec.requests.memory - + throttlingFactor / 100 * (memory.limit_in_bytes or - node allocatable memory - spec.requests.memory))/pageSize] - * pageSize MinLimitPercent specifies the minLimitFactor - percentage to calculate `memory.min`, which protects - memory from global reclamation when memory usage does - not exceed the min limit. Close: 0.' + description: |- + memcg qos + If enabled, memcg qos will be set by the agent, where some fields are implicitly calculated from pod spec. + 1. `memory.min` := spec.requests.memory * minLimitFactor / 100 (use 0 if requests.memory is not set) + 2. `memory.low` := spec.requests.memory * lowLimitFactor / 100 (use 0 if requests.memory is not set) + 3. `memory.limit_in_bytes` := spec.limits.memory (set $node.allocatable.memory if limits.memory is not set) + 4. `memory.high` := floor[(spec.requests.memory + throttlingFactor / 100 * (memory.limit_in_bytes or node allocatable memory - spec.requests.memory))/pageSize] * pageSize + MinLimitPercent specifies the minLimitFactor percentage to calculate `memory.min`, which protects memory + from global reclamation when memory usage does not exceed the min limit. + Close: 0. format: int64 minimum: 0 type: integer @@ -957,48 +910,46 @@ spec: format: int64 type: integer throttlingPercent: - description: 'ThrottlingPercent specifies the throttlingFactor - percentage to calculate `memory.high` with pod memory.limits - or node allocatable memory, which triggers memcg direct - reclamation when memory usage exceeds. Lower the factor - brings more heavier reclaim pressure. Close: 0.' + description: |- + ThrottlingPercent specifies the throttlingFactor percentage to calculate `memory.high` with pod + memory.limits or node allocatable memory, which triggers memcg direct reclamation when memory usage exceeds. + Lower the factor brings more heavier reclaim pressure. + Close: 0. format: int64 minimum: 0 type: integer wmarkMinAdj: - description: 'wmark_min_adj (Anolis OS required) WmarkMinAdj - specifies `memory.wmark_min_adj` which adjusts per-memcg - threshold for global memory reclamation. Lower the factor - brings later reclamation. The adjustment uses different - formula for different value range. [-25, 0):global_wmark_min'' - = global_wmark_min + (global_wmark_min - 0) * wmarkMinAdj - (0, 50]:global_wmark_min'' = global_wmark_min + (global_wmark_low - - global_wmark_min) * wmarkMinAdj Close: [LSR:0, LS:0, - BE:0]. Recommended: [LSR:-25, LS:-25, BE:50].' + description: |- + wmark_min_adj (Anolis OS required) + WmarkMinAdj specifies `memory.wmark_min_adj` which adjusts per-memcg threshold for global memory + reclamation. Lower the factor brings later reclamation. + The adjustment uses different formula for different value range. + [-25, 0):global_wmark_min' = global_wmark_min + (global_wmark_min - 0) * wmarkMinAdj + (0, 50]:global_wmark_min' = global_wmark_min + (global_wmark_low - global_wmark_min) * wmarkMinAdj + Close: [LSR:0, LS:0, BE:0]. Recommended: [LSR:-25, LS:-25, BE:50]. format: int64 maximum: 50 minimum: -25 type: integer wmarkRatio: - description: 'wmark_ratio (Anolis OS required) Async memory - reclamation is triggered when cgroup memory usage exceeds - `memory.wmark_high` and the reclamation stops when usage - is below `memory.wmark_low`. Basically, `memory.wmark_high` - := min(memory.high, memory.limit_in_bytes) * memory.memory.wmark_ratio - `memory.wmark_low` := min(memory.high, memory.limit_in_bytes) - * (memory.wmark_ratio - memory.wmark_scale_factor) WmarkRatio - specifies `memory.wmark_ratio` that help calculate `memory.wmark_high`, - which triggers async memory reclamation when memory - usage exceeds. Close: 0. Recommended: 95.' + description: |- + wmark_ratio (Anolis OS required) + Async memory reclamation is triggered when cgroup memory usage exceeds `memory.wmark_high` and the reclamation + stops when usage is below `memory.wmark_low`. Basically, + `memory.wmark_high` := min(memory.high, memory.limit_in_bytes) * memory.memory.wmark_ratio + `memory.wmark_low` := min(memory.high, memory.limit_in_bytes) * (memory.wmark_ratio - memory.wmark_scale_factor) + WmarkRatio specifies `memory.wmark_ratio` that help calculate `memory.wmark_high`, which triggers async + memory reclamation when memory usage exceeds. + Close: 0. Recommended: 95. format: int64 maximum: 100 minimum: 0 type: integer wmarkScalePermill: - description: 'WmarkScalePermill specifies `memory.wmark_scale_factor` - that helps calculate `memory.wmark_low`, which stops - async memory reclamation when memory usage belows. Close: - 50. Recommended: 20.' + description: |- + WmarkScalePermill specifies `memory.wmark_scale_factor` that helps calculate `memory.wmark_low`, which + stops async memory reclamation when memory usage belows. + Close: 50. Recommended: 20. format: int64 maximum: 1000 minimum: 1 @@ -1011,24 +962,22 @@ spec: - type: integer - type: string default: 100 - description: 'EgressLimit describes the maximum network - bandwidth can be used in the egress direction, unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + EgressLimit describes the maximum network bandwidth can be used in the egress direction, + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true egressRequest: anyOf: - type: integer - type: string default: 0 - description: 'EgressRequest describes the minimum network - bandwidth guaranteed in the egress direction. unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + EgressRequest describes the minimum network bandwidth guaranteed in the egress direction. + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true enable: type: boolean @@ -1037,24 +986,22 @@ spec: - type: integer - type: string default: 100 - description: 'IngressLimit describes the maximum network - bandwidth can be used in the ingress direction, unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + IngressLimit describes the maximum network bandwidth can be used in the ingress direction, + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true ingressRequest: anyOf: - type: integer - type: string default: 0 - description: 'IngressRequest describes the minimum network - bandwidth guaranteed in the ingress direction. unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + IngressRequest describes the minimum network bandwidth guaranteed in the ingress direction. + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true type: object resctrlQOS: @@ -1096,9 +1043,9 @@ spec: ioCfg: properties: enableUserModel: - description: 'configure the cost model of blkio-cost - manually whether the user model is enabled. - Default value: false' + description: |- + configure the cost model of blkio-cost manually + whether the user model is enabled. Default value: false type: boolean ioWeightPercent: description: 'This field is used to set the @@ -1141,31 +1088,25 @@ spec: minimum: 1 type: integer readBPS: - description: Throttling of throughput The value - is set to 0, which indicates that the feature - is disabled. + description: |- + Throttling of throughput + The value is set to 0, which indicates that the feature is disabled. format: int64 minimum: 0 type: integer readIOPS: - description: Throttling of IOPS The value is - set to 0, which indicates that the feature - is disabled. + description: |- + Throttling of IOPS + The value is set to 0, which indicates that the feature is disabled. format: int64 minimum: 0 type: integer readLatency: - description: 'Configure the weight-based throttling - feature of blk-iocost Only used for RootClass - After blk-iocost is enabled, the kernel calculates - the proportion of requests that exceed the - read or write latency threshold out of all - requests. When the proportion is greater than - the read or write latency percentile (95%), - the kernel considers the disk to be saturated - and reduces the rate at which requests are - sent to the disk. the read latency threshold. - Unit: microseconds.' + description: |- + Configure the weight-based throttling feature of blk-iocost + Only used for RootClass + After blk-iocost is enabled, the kernel calculates the proportion of requests that exceed the read or write latency threshold out of all requests. When the proportion is greater than the read or write latency percentile (95%), the kernel considers the disk to be saturated and reduces the rate at which requests are sent to the disk. + the read latency threshold. Unit: microseconds. format: int64 type: integer readLatencyPercent: @@ -1207,25 +1148,26 @@ spec: description: CPUQOSCfg stores node-level config of cpu qos properties: coreExpeller: - description: 'whether pods of the QoS class can expel - the cgroup idle pods at the SMT-level. default = false - If set to true, pods of this QoS will use a dedicated - core sched group for noise clean with the SchedIdle - pods. NOTE: It takes effect if cpuPolicy = "coreSched".' + description: |- + whether pods of the QoS class can expel the cgroup idle pods at the SMT-level. default = false + If set to true, pods of this QoS will use a dedicated core sched group for noise clean with the SchedIdle pods. + NOTE: It takes effect if cpuPolicy = "coreSched". type: boolean enable: description: Enable indicates whether the cpu qos is enabled. type: boolean groupIdentity: - description: 'group identity value for pods, default = - 0 NOTE: It takes effect if cpuPolicy = "groupIdentity".' + description: |- + group identity value for pods, default = 0 + NOTE: It takes effect if cpuPolicy = "groupIdentity". format: int64 type: integer schedIdle: - description: 'cpu.idle value for pods, default = 0. `1` - means using SCHED_IDLE. CGroup Idle (introduced since - mainline Linux 5.15): https://lore.kernel.org/lkml/162971078674.25758.15464079371945307825.tip-bot2@tip-bot2/#r - NOTE: It takes effect if cpuPolicy = "coreSched".' + description: |- + cpu.idle value for pods, default = 0. + `1` means using SCHED_IDLE. + CGroup Idle (introduced since mainline Linux 5.15): https://lore.kernel.org/lkml/162971078674.25758.15464079371945307825.tip-bot2@tip-bot2/#r + NOTE: It takes effect if cpuPolicy = "coreSched". format: int64 type: integer type: object @@ -1234,41 +1176,33 @@ spec: qos properties: enable: - description: 'Enable indicates whether the memory qos - is enabled (default: false). This field is used for - node-level control, while pod-level configuration is - done with MemoryQOS and `Policy` instead of an `Enable` - option. Please view the differences between MemoryQOSCfg - and PodMemoryQOSConfig structs.' + description: |- + Enable indicates whether the memory qos is enabled (default: false). + This field is used for node-level control, while pod-level configuration is done with MemoryQOS and `Policy` + instead of an `Enable` option. Please view the differences between MemoryQOSCfg and PodMemoryQOSConfig structs. type: boolean lowLimitPercent: - description: 'LowLimitPercent specifies the lowLimitFactor - percentage to calculate `memory.low`, which TRIES BEST - protecting memory from global reclamation when memory - usage does not exceed the low limit unless no unprotected - memcg can be reclaimed. NOTE: `memory.low` should be - larger than `memory.min`. If spec.requests.memory == - spec.limits.memory, pod `memory.low` and `memory.high` - become invalid, while `memory.wmark_ratio` is still - in effect. Close: 0.' + description: |- + LowLimitPercent specifies the lowLimitFactor percentage to calculate `memory.low`, which TRIES BEST + protecting memory from global reclamation when memory usage does not exceed the low limit unless no unprotected + memcg can be reclaimed. + NOTE: `memory.low` should be larger than `memory.min`. If spec.requests.memory == spec.limits.memory, + pod `memory.low` and `memory.high` become invalid, while `memory.wmark_ratio` is still in effect. + Close: 0. format: int64 minimum: 0 type: integer minLimitPercent: - description: 'memcg qos If enabled, memcg qos will be - set by the agent, where some fields are implicitly calculated - from pod spec. 1. `memory.min` := spec.requests.memory - * minLimitFactor / 100 (use 0 if requests.memory is - not set) 2. `memory.low` := spec.requests.memory * lowLimitFactor - / 100 (use 0 if requests.memory is not set) 3. `memory.limit_in_bytes` - := spec.limits.memory (set $node.allocatable.memory - if limits.memory is not set) 4. `memory.high` := floor[(spec.requests.memory - + throttlingFactor / 100 * (memory.limit_in_bytes or - node allocatable memory - spec.requests.memory))/pageSize] - * pageSize MinLimitPercent specifies the minLimitFactor - percentage to calculate `memory.min`, which protects - memory from global reclamation when memory usage does - not exceed the min limit. Close: 0.' + description: |- + memcg qos + If enabled, memcg qos will be set by the agent, where some fields are implicitly calculated from pod spec. + 1. `memory.min` := spec.requests.memory * minLimitFactor / 100 (use 0 if requests.memory is not set) + 2. `memory.low` := spec.requests.memory * lowLimitFactor / 100 (use 0 if requests.memory is not set) + 3. `memory.limit_in_bytes` := spec.limits.memory (set $node.allocatable.memory if limits.memory is not set) + 4. `memory.high` := floor[(spec.requests.memory + throttlingFactor / 100 * (memory.limit_in_bytes or node allocatable memory - spec.requests.memory))/pageSize] * pageSize + MinLimitPercent specifies the minLimitFactor percentage to calculate `memory.min`, which protects memory + from global reclamation when memory usage does not exceed the min limit. + Close: 0. format: int64 minimum: 0 type: integer @@ -1284,48 +1218,46 @@ spec: format: int64 type: integer throttlingPercent: - description: 'ThrottlingPercent specifies the throttlingFactor - percentage to calculate `memory.high` with pod memory.limits - or node allocatable memory, which triggers memcg direct - reclamation when memory usage exceeds. Lower the factor - brings more heavier reclaim pressure. Close: 0.' + description: |- + ThrottlingPercent specifies the throttlingFactor percentage to calculate `memory.high` with pod + memory.limits or node allocatable memory, which triggers memcg direct reclamation when memory usage exceeds. + Lower the factor brings more heavier reclaim pressure. + Close: 0. format: int64 minimum: 0 type: integer wmarkMinAdj: - description: 'wmark_min_adj (Anolis OS required) WmarkMinAdj - specifies `memory.wmark_min_adj` which adjusts per-memcg - threshold for global memory reclamation. Lower the factor - brings later reclamation. The adjustment uses different - formula for different value range. [-25, 0):global_wmark_min'' - = global_wmark_min + (global_wmark_min - 0) * wmarkMinAdj - (0, 50]:global_wmark_min'' = global_wmark_min + (global_wmark_low - - global_wmark_min) * wmarkMinAdj Close: [LSR:0, LS:0, - BE:0]. Recommended: [LSR:-25, LS:-25, BE:50].' + description: |- + wmark_min_adj (Anolis OS required) + WmarkMinAdj specifies `memory.wmark_min_adj` which adjusts per-memcg threshold for global memory + reclamation. Lower the factor brings later reclamation. + The adjustment uses different formula for different value range. + [-25, 0):global_wmark_min' = global_wmark_min + (global_wmark_min - 0) * wmarkMinAdj + (0, 50]:global_wmark_min' = global_wmark_min + (global_wmark_low - global_wmark_min) * wmarkMinAdj + Close: [LSR:0, LS:0, BE:0]. Recommended: [LSR:-25, LS:-25, BE:50]. format: int64 maximum: 50 minimum: -25 type: integer wmarkRatio: - description: 'wmark_ratio (Anolis OS required) Async memory - reclamation is triggered when cgroup memory usage exceeds - `memory.wmark_high` and the reclamation stops when usage - is below `memory.wmark_low`. Basically, `memory.wmark_high` - := min(memory.high, memory.limit_in_bytes) * memory.memory.wmark_ratio - `memory.wmark_low` := min(memory.high, memory.limit_in_bytes) - * (memory.wmark_ratio - memory.wmark_scale_factor) WmarkRatio - specifies `memory.wmark_ratio` that help calculate `memory.wmark_high`, - which triggers async memory reclamation when memory - usage exceeds. Close: 0. Recommended: 95.' + description: |- + wmark_ratio (Anolis OS required) + Async memory reclamation is triggered when cgroup memory usage exceeds `memory.wmark_high` and the reclamation + stops when usage is below `memory.wmark_low`. Basically, + `memory.wmark_high` := min(memory.high, memory.limit_in_bytes) * memory.memory.wmark_ratio + `memory.wmark_low` := min(memory.high, memory.limit_in_bytes) * (memory.wmark_ratio - memory.wmark_scale_factor) + WmarkRatio specifies `memory.wmark_ratio` that help calculate `memory.wmark_high`, which triggers async + memory reclamation when memory usage exceeds. + Close: 0. Recommended: 95. format: int64 maximum: 100 minimum: 0 type: integer wmarkScalePermill: - description: 'WmarkScalePermill specifies `memory.wmark_scale_factor` - that helps calculate `memory.wmark_low`, which stops - async memory reclamation when memory usage belows. Close: - 50. Recommended: 20.' + description: |- + WmarkScalePermill specifies `memory.wmark_scale_factor` that helps calculate `memory.wmark_low`, which + stops async memory reclamation when memory usage belows. + Close: 50. Recommended: 20. format: int64 maximum: 1000 minimum: 1 @@ -1338,24 +1270,22 @@ spec: - type: integer - type: string default: 100 - description: 'EgressLimit describes the maximum network - bandwidth can be used in the egress direction, unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + EgressLimit describes the maximum network bandwidth can be used in the egress direction, + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true egressRequest: anyOf: - type: integer - type: string default: 0 - description: 'EgressRequest describes the minimum network - bandwidth guaranteed in the egress direction. unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + EgressRequest describes the minimum network bandwidth guaranteed in the egress direction. + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true enable: type: boolean @@ -1364,24 +1294,22 @@ spec: - type: integer - type: string default: 100 - description: 'IngressLimit describes the maximum network - bandwidth can be used in the ingress direction, unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + IngressLimit describes the maximum network bandwidth can be used in the ingress direction, + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true ingressRequest: anyOf: - type: integer - type: string default: 0 - description: 'IngressRequest describes the minimum network - bandwidth guaranteed in the ingress direction. unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + IngressRequest describes the minimum network bandwidth guaranteed in the ingress direction. + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true type: object resctrlQOS: @@ -1433,9 +1361,9 @@ spec: ioCfg: properties: enableUserModel: - description: 'configure the cost model of blkio-cost - manually whether the user model is enabled. - Default value: false' + description: |- + configure the cost model of blkio-cost manually + whether the user model is enabled. Default value: false type: boolean ioWeightPercent: description: 'This field is used to set the @@ -1478,31 +1406,25 @@ spec: minimum: 1 type: integer readBPS: - description: Throttling of throughput The value - is set to 0, which indicates that the feature - is disabled. + description: |- + Throttling of throughput + The value is set to 0, which indicates that the feature is disabled. format: int64 minimum: 0 type: integer readIOPS: - description: Throttling of IOPS The value is - set to 0, which indicates that the feature - is disabled. + description: |- + Throttling of IOPS + The value is set to 0, which indicates that the feature is disabled. format: int64 minimum: 0 type: integer readLatency: - description: 'Configure the weight-based throttling - feature of blk-iocost Only used for RootClass - After blk-iocost is enabled, the kernel calculates - the proportion of requests that exceed the - read or write latency threshold out of all - requests. When the proportion is greater than - the read or write latency percentile (95%), - the kernel considers the disk to be saturated - and reduces the rate at which requests are - sent to the disk. the read latency threshold. - Unit: microseconds.' + description: |- + Configure the weight-based throttling feature of blk-iocost + Only used for RootClass + After blk-iocost is enabled, the kernel calculates the proportion of requests that exceed the read or write latency threshold out of all requests. When the proportion is greater than the read or write latency percentile (95%), the kernel considers the disk to be saturated and reduces the rate at which requests are sent to the disk. + the read latency threshold. Unit: microseconds. format: int64 type: integer readLatencyPercent: @@ -1544,25 +1466,26 @@ spec: description: CPUQOSCfg stores node-level config of cpu qos properties: coreExpeller: - description: 'whether pods of the QoS class can expel - the cgroup idle pods at the SMT-level. default = false - If set to true, pods of this QoS will use a dedicated - core sched group for noise clean with the SchedIdle - pods. NOTE: It takes effect if cpuPolicy = "coreSched".' + description: |- + whether pods of the QoS class can expel the cgroup idle pods at the SMT-level. default = false + If set to true, pods of this QoS will use a dedicated core sched group for noise clean with the SchedIdle pods. + NOTE: It takes effect if cpuPolicy = "coreSched". type: boolean enable: description: Enable indicates whether the cpu qos is enabled. type: boolean groupIdentity: - description: 'group identity value for pods, default = - 0 NOTE: It takes effect if cpuPolicy = "groupIdentity".' + description: |- + group identity value for pods, default = 0 + NOTE: It takes effect if cpuPolicy = "groupIdentity". format: int64 type: integer schedIdle: - description: 'cpu.idle value for pods, default = 0. `1` - means using SCHED_IDLE. CGroup Idle (introduced since - mainline Linux 5.15): https://lore.kernel.org/lkml/162971078674.25758.15464079371945307825.tip-bot2@tip-bot2/#r - NOTE: It takes effect if cpuPolicy = "coreSched".' + description: |- + cpu.idle value for pods, default = 0. + `1` means using SCHED_IDLE. + CGroup Idle (introduced since mainline Linux 5.15): https://lore.kernel.org/lkml/162971078674.25758.15464079371945307825.tip-bot2@tip-bot2/#r + NOTE: It takes effect if cpuPolicy = "coreSched". format: int64 type: integer type: object @@ -1571,41 +1494,33 @@ spec: qos properties: enable: - description: 'Enable indicates whether the memory qos - is enabled (default: false). This field is used for - node-level control, while pod-level configuration is - done with MemoryQOS and `Policy` instead of an `Enable` - option. Please view the differences between MemoryQOSCfg - and PodMemoryQOSConfig structs.' + description: |- + Enable indicates whether the memory qos is enabled (default: false). + This field is used for node-level control, while pod-level configuration is done with MemoryQOS and `Policy` + instead of an `Enable` option. Please view the differences between MemoryQOSCfg and PodMemoryQOSConfig structs. type: boolean lowLimitPercent: - description: 'LowLimitPercent specifies the lowLimitFactor - percentage to calculate `memory.low`, which TRIES BEST - protecting memory from global reclamation when memory - usage does not exceed the low limit unless no unprotected - memcg can be reclaimed. NOTE: `memory.low` should be - larger than `memory.min`. If spec.requests.memory == - spec.limits.memory, pod `memory.low` and `memory.high` - become invalid, while `memory.wmark_ratio` is still - in effect. Close: 0.' + description: |- + LowLimitPercent specifies the lowLimitFactor percentage to calculate `memory.low`, which TRIES BEST + protecting memory from global reclamation when memory usage does not exceed the low limit unless no unprotected + memcg can be reclaimed. + NOTE: `memory.low` should be larger than `memory.min`. If spec.requests.memory == spec.limits.memory, + pod `memory.low` and `memory.high` become invalid, while `memory.wmark_ratio` is still in effect. + Close: 0. format: int64 minimum: 0 type: integer minLimitPercent: - description: 'memcg qos If enabled, memcg qos will be - set by the agent, where some fields are implicitly calculated - from pod spec. 1. `memory.min` := spec.requests.memory - * minLimitFactor / 100 (use 0 if requests.memory is - not set) 2. `memory.low` := spec.requests.memory * lowLimitFactor - / 100 (use 0 if requests.memory is not set) 3. `memory.limit_in_bytes` - := spec.limits.memory (set $node.allocatable.memory - if limits.memory is not set) 4. `memory.high` := floor[(spec.requests.memory - + throttlingFactor / 100 * (memory.limit_in_bytes or - node allocatable memory - spec.requests.memory))/pageSize] - * pageSize MinLimitPercent specifies the minLimitFactor - percentage to calculate `memory.min`, which protects - memory from global reclamation when memory usage does - not exceed the min limit. Close: 0.' + description: |- + memcg qos + If enabled, memcg qos will be set by the agent, where some fields are implicitly calculated from pod spec. + 1. `memory.min` := spec.requests.memory * minLimitFactor / 100 (use 0 if requests.memory is not set) + 2. `memory.low` := spec.requests.memory * lowLimitFactor / 100 (use 0 if requests.memory is not set) + 3. `memory.limit_in_bytes` := spec.limits.memory (set $node.allocatable.memory if limits.memory is not set) + 4. `memory.high` := floor[(spec.requests.memory + throttlingFactor / 100 * (memory.limit_in_bytes or node allocatable memory - spec.requests.memory))/pageSize] * pageSize + MinLimitPercent specifies the minLimitFactor percentage to calculate `memory.min`, which protects memory + from global reclamation when memory usage does not exceed the min limit. + Close: 0. format: int64 minimum: 0 type: integer @@ -1621,48 +1536,46 @@ spec: format: int64 type: integer throttlingPercent: - description: 'ThrottlingPercent specifies the throttlingFactor - percentage to calculate `memory.high` with pod memory.limits - or node allocatable memory, which triggers memcg direct - reclamation when memory usage exceeds. Lower the factor - brings more heavier reclaim pressure. Close: 0.' + description: |- + ThrottlingPercent specifies the throttlingFactor percentage to calculate `memory.high` with pod + memory.limits or node allocatable memory, which triggers memcg direct reclamation when memory usage exceeds. + Lower the factor brings more heavier reclaim pressure. + Close: 0. format: int64 minimum: 0 type: integer wmarkMinAdj: - description: 'wmark_min_adj (Anolis OS required) WmarkMinAdj - specifies `memory.wmark_min_adj` which adjusts per-memcg - threshold for global memory reclamation. Lower the factor - brings later reclamation. The adjustment uses different - formula for different value range. [-25, 0):global_wmark_min'' - = global_wmark_min + (global_wmark_min - 0) * wmarkMinAdj - (0, 50]:global_wmark_min'' = global_wmark_min + (global_wmark_low - - global_wmark_min) * wmarkMinAdj Close: [LSR:0, LS:0, - BE:0]. Recommended: [LSR:-25, LS:-25, BE:50].' + description: |- + wmark_min_adj (Anolis OS required) + WmarkMinAdj specifies `memory.wmark_min_adj` which adjusts per-memcg threshold for global memory + reclamation. Lower the factor brings later reclamation. + The adjustment uses different formula for different value range. + [-25, 0):global_wmark_min' = global_wmark_min + (global_wmark_min - 0) * wmarkMinAdj + (0, 50]:global_wmark_min' = global_wmark_min + (global_wmark_low - global_wmark_min) * wmarkMinAdj + Close: [LSR:0, LS:0, BE:0]. Recommended: [LSR:-25, LS:-25, BE:50]. format: int64 maximum: 50 minimum: -25 type: integer wmarkRatio: - description: 'wmark_ratio (Anolis OS required) Async memory - reclamation is triggered when cgroup memory usage exceeds - `memory.wmark_high` and the reclamation stops when usage - is below `memory.wmark_low`. Basically, `memory.wmark_high` - := min(memory.high, memory.limit_in_bytes) * memory.memory.wmark_ratio - `memory.wmark_low` := min(memory.high, memory.limit_in_bytes) - * (memory.wmark_ratio - memory.wmark_scale_factor) WmarkRatio - specifies `memory.wmark_ratio` that help calculate `memory.wmark_high`, - which triggers async memory reclamation when memory - usage exceeds. Close: 0. Recommended: 95.' + description: |- + wmark_ratio (Anolis OS required) + Async memory reclamation is triggered when cgroup memory usage exceeds `memory.wmark_high` and the reclamation + stops when usage is below `memory.wmark_low`. Basically, + `memory.wmark_high` := min(memory.high, memory.limit_in_bytes) * memory.memory.wmark_ratio + `memory.wmark_low` := min(memory.high, memory.limit_in_bytes) * (memory.wmark_ratio - memory.wmark_scale_factor) + WmarkRatio specifies `memory.wmark_ratio` that help calculate `memory.wmark_high`, which triggers async + memory reclamation when memory usage exceeds. + Close: 0. Recommended: 95. format: int64 maximum: 100 minimum: 0 type: integer wmarkScalePermill: - description: 'WmarkScalePermill specifies `memory.wmark_scale_factor` - that helps calculate `memory.wmark_low`, which stops - async memory reclamation when memory usage belows. Close: - 50. Recommended: 20.' + description: |- + WmarkScalePermill specifies `memory.wmark_scale_factor` that helps calculate `memory.wmark_low`, which + stops async memory reclamation when memory usage belows. + Close: 50. Recommended: 20. format: int64 maximum: 1000 minimum: 1 @@ -1675,24 +1588,22 @@ spec: - type: integer - type: string default: 100 - description: 'EgressLimit describes the maximum network - bandwidth can be used in the egress direction, unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + EgressLimit describes the maximum network bandwidth can be used in the egress direction, + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true egressRequest: anyOf: - type: integer - type: string default: 0 - description: 'EgressRequest describes the minimum network - bandwidth guaranteed in the egress direction. unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + EgressRequest describes the minimum network bandwidth guaranteed in the egress direction. + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true enable: type: boolean @@ -1701,24 +1612,22 @@ spec: - type: integer - type: string default: 100 - description: 'IngressLimit describes the maximum network - bandwidth can be used in the ingress direction, unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + IngressLimit describes the maximum network bandwidth can be used in the ingress direction, + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true ingressRequest: anyOf: - type: integer - type: string default: 0 - description: 'IngressRequest describes the minimum network - bandwidth guaranteed in the ingress direction. unit: - bps(bytes per second), two expressions are supported,int - and string, int: percentage based on total bandwidth,valid - in 0-100 string: a specific network bandwidth value, - eg: 50M.' + description: |- + IngressRequest describes the minimum network bandwidth guaranteed in the ingress direction. + unit: bps(bytes per second), two expressions are supported,int and string, + int: percentage based on total bandwidth,valid in 0-100 + string: a specific network bandwidth value, eg: 50M. x-kubernetes-int-or-string: true type: object resctrlQOS: @@ -1754,16 +1663,16 @@ spec: description: BE pods will be limited if node resource usage overload properties: cpuEvictBESatisfactionLowerPercent: - description: be.satisfactionRate = be.CPURealLimit/be.CPURequest; - be.cpuUsage = be.CPUUsed/be.CPURealLimit if be.satisfactionRate - < CPUEvictBESatisfactionLowerPercent/100 && be.usage >= CPUEvictBEUsageThresholdPercent/100, + description: |- + be.satisfactionRate = be.CPURealLimit/be.CPURequest; be.cpuUsage = be.CPUUsed/be.CPURealLimit + if be.satisfactionRate < CPUEvictBESatisfactionLowerPercent/100 && be.usage >= CPUEvictBEUsageThresholdPercent/100, then start to evict pod, and will evict to ${CPUEvictBESatisfactionUpperPercent} format: int64 type: integer cpuEvictBESatisfactionUpperPercent: - description: be.satisfactionRate = be.CPURealLimit/be.CPURequest - if be.satisfactionRate > CPUEvictBESatisfactionUpperPercent/100, - then stop to evict. + description: |- + be.satisfactionRate = be.CPURealLimit/be.CPURequest + if be.satisfactionRate > CPUEvictBESatisfactionUpperPercent/100, then stop to evict. format: int64 type: integer cpuEvictBEUsageThresholdPercent: @@ -1772,13 +1681,14 @@ spec: format: int64 type: integer cpuEvictPolicy: - description: 'CPUEvictPolicy defines the policy for the BECPUEvict - feature. Default: `evictByRealLimit`.' + description: |- + CPUEvictPolicy defines the policy for the BECPUEvict feature. + Default: `evictByRealLimit`. type: string cpuEvictTimeWindowSeconds: - description: when avg(cpuusage) > CPUEvictThresholdPercent, will - start to evict pod by cpu, and avg(cpuusage) is calculated based - on the most recent CPUEvictTimeWindowSeconds data + description: |- + when avg(cpuusage) > CPUEvictThresholdPercent, will start to evict pod by cpu, + and avg(cpuusage) is calculated based on the most recent CPUEvictTimeWindowSeconds data format: int64 type: integer cpuSuppressPolicy: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index fad286732..b08f13e5e 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -2,7 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index d67194488..a454cdcf8 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -2,7 +2,6 @@ apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: - creationTimestamp: null name: mutating-webhook-configuration webhooks: - admissionReviewVersions: @@ -90,7 +89,6 @@ webhooks: apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration metadata: - creationTimestamp: null name: validating-webhook-configuration webhooks: - admissionReviewVersions: diff --git a/docker/koord-descheduler.dockerfile b/docker/koord-descheduler.dockerfile index 080e4774c..6b8ba522b 100644 --- a/docker/koord-descheduler.dockerfile +++ b/docker/koord-descheduler.dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$TARGETPLATFORM golang:1.18 as builder +FROM --platform=$TARGETPLATFORM golang:1.20 as builder WORKDIR /go/src/github.com/koordinator-sh/koordinator ARG VERSION diff --git a/docker/koord-manager.dockerfile b/docker/koord-manager.dockerfile index 0684a22f1..eaba20983 100644 --- a/docker/koord-manager.dockerfile +++ b/docker/koord-manager.dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$TARGETPLATFORM golang:1.18 as builder +FROM --platform=$TARGETPLATFORM golang:1.20 as builder WORKDIR /go/src/github.com/koordinator-sh/koordinator ARG VERSION diff --git a/docker/koord-runtimeproxy.dockerfile b/docker/koord-runtimeproxy.dockerfile index 22df07e53..a4dc07db1 100644 --- a/docker/koord-runtimeproxy.dockerfile +++ b/docker/koord-runtimeproxy.dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$TARGETPLATFORM golang:1.18 as builder +FROM --platform=$TARGETPLATFORM golang:1.20 as builder WORKDIR /go/src/github.com/koordinator-sh/koordinator ARG VERSION diff --git a/docker/koord-scheduler.dockerfile b/docker/koord-scheduler.dockerfile index 4fa591ba4..4d4f47588 100644 --- a/docker/koord-scheduler.dockerfile +++ b/docker/koord-scheduler.dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$TARGETPLATFORM golang:1.18 as builder +FROM --platform=$TARGETPLATFORM golang:1.20 as builder WORKDIR /go/src/github.com/koordinator-sh/koordinator ARG VERSION diff --git a/docker/koordlet.dockerfile b/docker/koordlet.dockerfile index 21ab637ca..682198f4a 100644 --- a/docker/koordlet.dockerfile +++ b/docker/koordlet.dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$TARGETPLATFORM golang:1.18 as builder +FROM --platform=$TARGETPLATFORM golang:1.20 as builder WORKDIR /go/src/github.com/koordinator-sh/koordinator ARG VERSION @@ -33,7 +33,7 @@ RUN go build -a -o koordlet cmd/koordlet/main.go # For more details about how those images got built, you might wanna check the original Dockerfile in # https://gitlab.com/nvidia/container-images/cuda/-/tree/master/dist. -FROM --platform=$TARGETPLATFORM nvidia/cuda:11.6.2-base-ubuntu20.04 +FROM --platform=$TARGETPLATFORM nvidia/cuda:11.8.0-base-ubuntu22.04 WORKDIR / RUN apt-get update && apt-get install -y lvm2 && rm -rf /var/lib/apt/lists/* COPY --from=builder /go/src/github.com/koordinator-sh/koordinator/koordlet . diff --git a/go.mod b/go.mod index 23666881e..a568f21d8 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/koordinator-sh/koordinator -go 1.18 +go 1.20 require ( github.com/NVIDIA/go-nvml v0.11.6-0.0.20220823120812-7e2082095e82 @@ -9,10 +9,10 @@ require ( github.com/docker/docker v20.10.21+incompatible github.com/evanphx/json-patch v5.6.0+incompatible github.com/fsnotify/fsnotify v1.6.0 - github.com/gin-gonic/gin v1.8.1 - github.com/go-playground/locales v0.14.0 - github.com/go-playground/universal-translator v0.18.0 - github.com/go-playground/validator/v10 v10.10.0 + github.com/gin-gonic/gin v1.9.0 + github.com/go-playground/locales v0.14.1 + github.com/go-playground/universal-translator v0.18.1 + github.com/go-playground/validator/v10 v10.11.2 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 @@ -23,63 +23,84 @@ require ( github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb github.com/mwitkow/grpc-proxy v0.0.0-20230212185441-f345521cb9c9 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.24.0 - github.com/opencontainers/runc v1.1.6 - github.com/openkruise/kruise-api v1.3.0 + github.com/onsi/gomega v1.27.10 + github.com/opencontainers/runc v1.1.7 + github.com/openkruise/kruise-api v1.5.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/prashantv/gostub v1.1.0 - github.com/prometheus/client_golang v1.14.0 - github.com/prometheus/prometheus v0.37.0 - github.com/spf13/cobra v1.6.1 + github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/prometheus v0.0.0-00010101000000-000000000000 + github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.2 + github.com/stretchr/testify v1.8.3 go.uber.org/atomic v1.11.0 - go.uber.org/multierr v1.6.0 - golang.org/x/crypto v0.14.0 - golang.org/x/net v0.16.0 - golang.org/x/sys v0.13.0 - golang.org/x/time v0.0.0-20220920022843-2ce7c2934d45 - google.golang.org/grpc v1.51.0 - google.golang.org/protobuf v1.28.1 + go.uber.org/multierr v1.11.0 + golang.org/x/crypto v0.16.0 + golang.org/x/net v0.19.0 + golang.org/x/sys v0.15.0 + golang.org/x/time v0.3.0 + google.golang.org/grpc v1.56.3 + google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.26.0 - k8s.io/apimachinery v0.26.0 - k8s.io/apiserver v0.26.0 - k8s.io/client-go v0.26.0 - k8s.io/code-generator v0.26.0 - k8s.io/component-base v0.26.0 - k8s.io/component-helpers v0.26.0 - k8s.io/cri-api v0.25.3 - k8s.io/klog/v2 v2.80.1 - k8s.io/kube-scheduler v0.22.6 - k8s.io/kubectl v0.22.6 - k8s.io/kubelet v0.22.6 - k8s.io/kubernetes v1.24.15 - k8s.io/metrics v0.24.15 - k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 - sigs.k8s.io/controller-runtime v0.12.3 + k8s.io/api v0.28.7 + k8s.io/apimachinery v0.28.7 + k8s.io/apiserver v0.28.7 + k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible + k8s.io/code-generator v0.28.7 + k8s.io/component-base v0.28.7 + k8s.io/component-helpers v0.28.7 + k8s.io/cri-api v0.28.7 + k8s.io/klog/v2 v2.100.1 + k8s.io/kube-scheduler v0.28.7 + k8s.io/kubectl v0.28.7 + k8s.io/kubelet v0.28.7 + k8s.io/kubernetes v1.28.7 + k8s.io/utils v0.0.0-20240102154912-e7106e64919e + sigs.k8s.io/controller-runtime v0.16.5 sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20231005234617-5771399a8ce5 sigs.k8s.io/descheduler v0.26.0 - sigs.k8s.io/scheduler-plugins v0.22.6 sigs.k8s.io/yaml v1.3.0 ) require ( - cloud.google.com/go/compute v1.7.0 // indirect - github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/containerd/containerd v1.6.9 // indirect + github.com/containerd/ttrpc v1.2.2 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/cel-go v0.16.1 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect + golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + k8s.io/controller-manager v0.28.7 // indirect + k8s.io/dynamic-resource-allocation v0.28.7 // indirect + k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect + k8s.io/kms v0.28.7 // indirect +) + +require ( + cloud.google.com/go/compute v1.19.1 // indirect + github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.28 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect + github.com/Azure/go-autorest/autorest v0.11.29 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/mocks v0.4.2 // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3 // indirect + github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b // indirect github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect - github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/Microsoft/go-winio v0.6.0 // indirect github.com/Microsoft/hcsshim v0.9.4 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect @@ -88,71 +109,62 @@ require ( github.com/aws/aws-sdk-go v1.44.102 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/bytedance/sonic v1.8.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect - github.com/cilium/ebpf v0.7.0 // indirect - github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 // indirect - github.com/container-storage-interface/spec v1.5.0 // indirect - github.com/containerd/cgroups v1.0.3 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/cilium/ebpf v0.9.1 // indirect + github.com/container-storage-interface/spec v1.8.0 // indirect + github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/console v1.0.3 // indirect - github.com/containerd/containerd v1.6.9 // indirect - github.com/containerd/ttrpc v1.1.1-0.20220420014843-944ef4a40df3 // indirect - github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect - github.com/cyphar/filepath-securejoin v0.2.3 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dennwc/varint v1.0.0 // indirect - github.com/dnaeon/go-vcr v1.2.0 // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful v2.9.5+incompatible // indirect github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/zapr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.21.1 // indirect - github.com/go-ozzo/ozzo-validation v3.5.0+incompatible // indirect - github.com/goccy/go-json v0.9.7 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/zapr v1.2.4 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/goccy/go-json v0.10.0 // indirect github.com/godbus/dbus/v5 v5.0.6 // indirect - github.com/gofrs/uuid v4.2.0+incompatible // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.2.0 // indirect + github.com/gofrs/uuid v4.4.0+incompatible // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/cadvisor v0.44.1 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/cadvisor v0.47.3 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect - github.com/googleapis/gax-go/v2 v2.5.1 // indirect - github.com/gophercloud/gophercloud v1.0.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/heketi/heketi v10.3.0+incompatible // indirect github.com/hodgesds/perf-utils v0.7.0 github.com/imdario/mergo v0.3.12 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/karrick/godirwalk v1.17.0 // indirect + github.com/klauspost/cpuid/v2 v2.0.9 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/libopenstorage/openstorage v1.0.0 // indirect github.com/lithammer/dedent v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/ipvs v1.0.1 // indirect + github.com/moby/ipvs v1.1.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect @@ -168,117 +180,127 @@ require ( github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 // indirect github.com/opencontainers/selinux v1.10.1 // indirect - github.com/pelletier/go-toml/v2 v2.0.1 // indirect + github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.3.0 - github.com/prometheus/common v0.38.0 // indirect + github.com/prometheus/client_model v0.4.0 + github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/quobyte/api v0.1.8 // indirect + github.com/prometheus/procfs v0.10.1 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 // indirect - github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/afero v1.6.0 // indirect - github.com/storageos/go-api v2.2.0+incompatible // indirect + github.com/seccomp/libseccomp-golang v0.10.0 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect + github.com/spf13/afero v1.9.2 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect - github.com/ugorji/go/codec v1.2.7 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.9 // indirect github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 // indirect - github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect - github.com/vmware/govmomi v0.30.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.5 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect - go.etcd.io/etcd/client/v3 v3.5.5 // indirect - go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/contrib v0.20.0 // indirect + github.com/vishvananda/netns v0.0.4 // indirect + github.com/vmware/govmomi v0.30.6 // indirect + go.etcd.io/etcd/api/v3 v3.5.9 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect + go.etcd.io/etcd/client/v3 v3.5.9 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.2 // indirect go.opentelemetry.io/otel v1.10.0 // indirect - go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect - go.opentelemetry.io/otel/metric v0.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect + go.opentelemetry.io/otel/metric v0.32.2 // indirect go.opentelemetry.io/otel/sdk v1.10.0 // indirect - go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect - go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect go.opentelemetry.io/otel/trace v1.10.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect - go.uber.org/goleak v1.2.0 // indirect - go.uber.org/zap v1.19.1 // indirect - golang.org/x/mod v0.13.0 // indirect - golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect - golang.org/x/sync v0.4.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/tools v0.14.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - google.golang.org/api v0.96.0 // indirect + go.uber.org/goleak v1.2.1 // indirect + go.uber.org/zap v1.25.0 // indirect + golang.org/x/arch v0.0.0-20210923205945-b76863e36670 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/oauth2 v0.8.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.16.1 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/api v0.114.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect gopkg.in/gcfg.v1 v1.2.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.24.2 // indirect - k8s.io/cloud-provider v0.24.15 // indirect - k8s.io/csi-translation-lib v0.24.15 // indirect - k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect - k8s.io/kube-proxy v0.0.0 // indirect + k8s.io/apiextensions-apiserver v0.28.3 // indirect + k8s.io/cloud-provider v0.28.7 // indirect + k8s.io/csi-translation-lib v0.28.7 // indirect + k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/legacy-cloud-providers v0.0.0 // indirect - k8s.io/mount-utils v0.24.15 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + k8s.io/metrics v0.28.7 + k8s.io/mount-utils v0.28.7 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) replace ( + github.com/AliyunContainerService/terway-apis => gitlab.alibaba-inc.com/cos/terway-apis v0.1.1 + // github.com/containerd/ttrpc => github.com/containerd/ttrpc v1.1.0 github.com/go-logr/logr => github.com/go-logr/logr v1.2.0 - github.com/google/cadvisor => github.com/koordinator-sh/cadvisor v0.0.0-20230619152730-4b5c088201f6 github.com/gophercloud/gophercloud => github.com/gophercloud/gophercloud v0.1.0 + github.com/grpc-ecosystem/go-grpc-middleware => github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 + github.com/grpc-ecosystem/go-grpc-prometheus => github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 + github.com/grpc-ecosystem/grpc-gateway => github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/grpc-ecosystem/grpc-gateway/v2 => github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 + github.com/koordinator-sh/apis => github.com/koordinator-sh/apis v1.2.0 github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.39.2 - go.opentelemetry.io/contrib => go.opentelemetry.io/contrib v0.20.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp => go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 - go.opentelemetry.io/otel => go.opentelemetry.io/otel v0.20.0 - go.opentelemetry.io/otel/metric => go.opentelemetry.io/otel/metric v0.20.0 - go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v0.20.0 - go.opentelemetry.io/otel/trace => go.opentelemetry.io/otel/trace v0.20.0 - go.opentelemetry.io/proto/otlp => go.opentelemetry.io/proto/otlp v0.7.0 + gitlab.alibaba-inc.com/cache/api => gitlab.alibaba-inc.com/cache/api v0.1.5-0.20240308092105-65e5b96f60ab + gitlab.alibaba-inc.com/cos/unified-resource-api => gitlab.alibaba-inc.com/cos/unified-resource-api v1.22.15-8.0.20240308085936-5f879bf29db5 + gitlab.alibaba-inc.com/serverlessinfra/dummy-workload => gitlab.alibaba-inc.com/serverlessinfra/dummy-workload v1.0.2-0.20240308091048-eae05b39e593 + gitlab.alibaba-inc.com/unischeduler/api => gitlab.alibaba-inc.com/unischeduler/api v0.0.3-0.20240308091437-58bf0e6c4d47 + gitlab.alibaba-inc.com/virtcontainers/agent-protocols => gitlab.alibaba-inc.com/koordinator-sh/virtcontainers-protocols v0.0.0-20240313131222-81245f6955f2 // replace for exclude github.com/gogo/protobuf golang.org/x/crypto => golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 golang.org/x/time => golang.org/x/time v0.3.0 - k8s.io/api => k8s.io/api v0.24.15 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.24.15 - k8s.io/apimachinery => k8s.io/apimachinery v0.24.15 - k8s.io/apiserver => k8s.io/apiserver v0.24.15 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.24.15 - k8s.io/client-go => k8s.io/client-go v0.24.15 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.24.15 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.15 - k8s.io/code-generator => k8s.io/code-generator v0.24.15 - k8s.io/component-base => k8s.io/component-base v0.24.15 - k8s.io/component-helpers => k8s.io/component-helpers v0.24.15 - k8s.io/controller-manager => k8s.io/controller-manager v0.24.15 - k8s.io/cri-api => k8s.io/cri-api v0.24.15 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.15 + google.golang.org/api => google.golang.org/api v0.114.0 + google.golang.org/genproto => google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 + google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 + google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 + google.golang.org/grpc => google.golang.org/grpc v1.56.3 + google.golang.org/protobuf => google.golang.org/protobuf v1.31.0 + k8s.io/api => k8s.io/api v0.28.7 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.7 + k8s.io/apimachinery => k8s.io/apimachinery v0.28.7 + k8s.io/apiserver => k8s.io/apiserver v0.28.7 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.7 + k8s.io/client-go => k8s.io/client-go v0.28.7 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.7 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.7 + k8s.io/code-generator => k8s.io/code-generator v0.28.7 + k8s.io/component-base => k8s.io/component-base v0.28.7 + k8s.io/component-helpers => k8s.io/component-helpers v0.28.7 + k8s.io/controller-manager => k8s.io/controller-manager v0.28.7 + k8s.io/cri-api => k8s.io/cri-api v0.28.7 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.7 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.7 + k8s.io/endpointslice => k8s.io/endpointslice v0.28.7 k8s.io/gengo => k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 - k8s.io/klog/v2 => k8s.io/klog/v2 v2.60.1 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.15 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.15 - k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.15 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.15 - k8s.io/kubectl => k8s.io/kubectl v0.24.15 - k8s.io/kubelet => k8s.io/kubelet v0.24.15 - k8s.io/kubernetes => k8s.io/kubernetes v1.24.15 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.24.15 - k8s.io/metrics => k8s.io/metrics v0.24.15 - k8s.io/mount-utils => k8s.io/mount-utils v0.24.15 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.15 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.15 + k8s.io/klog/v2 => k8s.io/klog/v2 v2.100.1 + k8s.io/kms => k8s.io/kms v0.28.7 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.7 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.7 + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.7 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.7 + k8s.io/kubectl => k8s.io/kubectl v0.28.7 + k8s.io/kubelet => k8s.io/kubelet v0.28.7 + k8s.io/kubernetes => k8s.io/kubernetes v1.28.7 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.7 + k8s.io/metrics => k8s.io/metrics v0.28.7 + k8s.io/mount-utils => k8s.io/mount-utils v0.28.7 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.7 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.7 + sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.16.5 sigs.k8s.io/descheduler => sigs.k8s.io/descheduler v0.26.1-0.20230402001301-90905d2c2194 - sigs.k8s.io/scheduler-plugins => sigs.k8s.io/scheduler-plugins v0.22.6 ) diff --git a/go.sum b/go.sum index 4a5a0ad72..4d7e82d6f 100644 --- a/go.sum +++ b/go.sum @@ -1,12 +1,9 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +bitbucket.org/bertimus9/systemstat v0.5.0/go.mod h1:EkUWPp8lKFPMXP8vnbpT5JDI0W/sTiLZAvN8ONWErHY= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -15,74 +12,206 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6 cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= -dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= -dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= -github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= -github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= @@ -90,21 +219,20 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9A github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3 h1:FCalqNmQYSMCCHoCtAxZN/ZgLc8ufgeo5Z3wrIoJZvs= -github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3/go.mod h1:8XasY4ymP2V/tn2OOV9ZadmiTE1FIB/h3W+yNlPttKw= +github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b h1:Heo1J/ttaQFgGJSVnCZquy3e5eH5j1nqxBuomztB3P0= +github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b/go.mod h1:FNj4KYEAAHfYu68kRYolGoxkaJn+6mdEsaM12VTwuI0= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= @@ -114,8 +242,8 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -124,7 +252,7 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0= +github.com/Microsoft/hcsshim v0.8.25/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I= github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= @@ -135,11 +263,14 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/a8m/tree v0.0.0-20210115125333-10a5fd5b637d/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/alecthomas/kingpin/v2 v2.3.1/go.mod h1:oYL5vtsvEHZGHxU7DMp32Dvx+qL+ptGn6lWaot2vCNE= +github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -148,9 +279,12 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -162,17 +296,13 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/auth0/go-jwt-middleware v1.0.1 h1:/fsQ4vRr4zod1wKReUH+0A3ySRjGiT9G34kypO/EKwI= -github.com/auth0/go-jwt-middleware v1.0.1/go.mod h1:YSeUX3z6+TF2H+7padiEqNJ73Zy9vXW72U//IgN0BIM= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.102 h1:6tUCTGL2UDbFZae1TLGk8vTgeXuzkb8KbAe2FiAeKHc= github.com/aws/aws-sdk-go v1.44.102/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -182,37 +312,43 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.8.0 h1:ea0Xadu+sHlu7x5O3gKhRpQ1IKiMrSiHttPF0ybECuA= +github.com/bytedance/sonic v1.8.0/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -221,26 +357,18 @@ github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLI github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313 h1:eIHD9GNM3Hp7kcRW5mvcz7WTR3ETeoYYKwpgA04kaXE= -github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= +github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc h1:PYXxkRUBGUMa5xgMVMDl62vEklZvKpVaxQeN9ie7Hfk= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo= -github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI= +github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= @@ -255,8 +383,8 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= -github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -273,7 +401,6 @@ github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go. github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.12/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= @@ -318,8 +445,8 @@ github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0x github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= -github.com/containerd/ttrpc v1.1.1-0.20220420014843-944ef4a40df3 h1:BhCp66ofL8oYcdelc3CBXc2/Pfvvgx+s+mrp9TvNgn8= -github.com/containerd/ttrpc v1.1.1-0.20220420014843-944ef4a40df3/go.mod h1:YYyNVhZrTMiaf51Vj6WhAJqJw+vl/nzABhj8pWrzle4= +github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs= +github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -339,24 +466,26 @@ github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= -github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE= +github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= +github.com/coredns/corefile-migration v1.0.20/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -365,9 +494,11 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= @@ -375,7 +506,7 @@ github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjI github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= +github.com/daviddengcn/go-colortext v1.0.0/go.mod h1:zDqEI5NVUop5QPpVJUxE9UO10hRnmkD5G4Pmri9+m4c= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= @@ -390,11 +521,10 @@ github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog= github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= @@ -411,63 +541,60 @@ github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNE github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02/go.mod h1:7NQ3kWOx2cZOSjtcveTa5nqupVr2s6/83sG+rTlI7uA= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.8 h1:B2cR/FAaiMtYDHv5BQpaqtkjGuWQIgr2KQZtHQ7f6i8= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= -github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8= +github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -475,8 +602,11 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -484,36 +614,36 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-ozzo/ozzo-validation v3.5.0+incompatible h1:sUy/in/P6askYr16XJgTKq/0SZhiWsdg4WZGaLsGQkM= -github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= -github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= -github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= -github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= -github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= -github.com/go-playground/validator/v10 v10.10.0 h1:I7mrTYv78z8k8VXa/qJlOlEXn/nBh+BF8dHX5nt/dr0= -github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= +github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.11.2 h1:q3SHpufmypg+erIExEKUmsgmhDTyhcJ38oeKGACXohU= +github.com/go-playground/validator/v10 v10.11.2/go.mod h1:NieE624vt4SCTJtD87arVLvdmjPAeV8BQlHtMnw9D7s= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= -github.com/goccy/go-json v0.9.7 h1:IcB+Aqpx/iMHu5Yooh7jEzJk1JZ7Pjtmys2ukPr7EeM= -github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.0 h1:mXKd9Qw4NuzShiRlOXKews24ufknHO7gx30lsDyokKA= +github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= @@ -521,11 +651,11 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= -github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -533,20 +663,20 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -561,10 +691,6 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= @@ -577,21 +703,27 @@ github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= +github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= +github.com/golangplus/bytes v1.0.0/go.mod h1:AdRaCFwmc/00ZzELMWb01soso6W1R/++O1XL80yAn+A= +github.com/golangplus/fmt v1.0.0/go.mod h1:zpM0OfbMCjPtd2qkTD/jX2MgiFCqklhSUFyDW44gVQE= +github.com/golangplus/testing v1.0.0/go.mod h1:ZDreixUV3YzhoVraIDyOzHrr76p6NUh6k/pPg/Q3gYA= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.10.4/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= -github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cadvisor v0.47.3 h1:5XKTHBduWlBjmgw07uwEiC+Xa/FRd0MZI37oqlTagO0= +github.com/google/cadvisor v0.47.3/go.mod h1:iJdTjcjyKHjLCf7OSTzwP5GxdfrkPusw2x5bwGvuLUw= +github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo= +github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -603,8 +735,6 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -614,20 +744,17 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20220829040838-70bd9ae97f40 h1:ykKxL12NZd3JmWZnyqarJGsF73M9Xhtrik/FEtEeFRE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -635,46 +762,42 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1 h1:kBRZU0PSuI7PspsSb/ChWoVResUcwNVIdpB049pKTiw= github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k= github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.15.2 h1:3Q/pDqvJ7udgt/60QOOW/p/PeKioQN+ncYzzCdN2av0= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= @@ -707,31 +830,27 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/nomad/api v0.0.0-20220921012004-ddeeb1040edf h1:l/EZ57iRPNs8vd8c9qH0dB4Q+IiZHJouLAgxJ5j25tU= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= -github.com/heketi/heketi v10.3.0+incompatible h1:X4DBFPzcyWZWhia32d94UhDECQJHH0M5kpRb1gxxUHk= -github.com/heketi/heketi v10.3.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= -github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6 h1:oJ/NLadJn5HoxvonA6VxG31lg0d6XOURNA09BTtM4fY= -github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqsr3rHE1Ow= github.com/hodgesds/perf-utils v0.7.0 h1:7KlHGMuig4FRH5fNw68PV6xLmgTe7jKs9hgAcEAbioU= github.com/hodgesds/perf-utils v0.7.0/go.mod h1:LAklqfDadNKpkxoAJNHpD5tkY0rkZEVdnCEWN5k4QJY= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ionos-cloud/sdk-go/v6 v6.1.3 h1:vb6yqdpiqaytvreM0bsn2pXw+1YDvEk2RKSmBAQvgDQ= github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jedib0t/go-pretty/v6 v6.4.0 h1:YlI/2zYDrweA4MThiYMKtGRfT+2qZOO65ulej8GTcVI= github.com/jedib0t/go-pretty/v6 v6.4.0/go.mod h1:MgmISkTWDSFu0xOqiZ0mKNntMQ2mDgOcwOkwBEkMDJI= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -758,34 +877,36 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.0.12/go.mod h1:AkACMQGiTgCt0lQw3m7TTU8PLH9lYKNK5e9DqFf5VuM= github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.1.1 h1:BI3L7hNqRvXtB42FO4NI/0ZjDDVRPOMBDFLShhFtf28= github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.1.1/go.mod h1:AkACMQGiTgCt0lQw3m7TTU8PLH9lYKNK5e9DqFf5VuM= -github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/kolo/xmlrpc v0.0.0-20220919000247-3377102c83bd h1:b1taQnM42dp3NdiiQwfmM1WyyucHayZSKN5R0PRYWL0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koordinator-sh/cadvisor v0.0.0-20230619152730-4b5c088201f6 h1:JWTehyQhdu6YuvMy48RE8yTiHG5Jbvh69z6WqMZgTLY= -github.com/koordinator-sh/cadvisor v0.0.0-20230619152730-4b5c088201f6/go.mod h1:v7jZ8zyrlnqg3GvliibfHJJFLdgiqKQBcwIRVelkoP4= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -798,40 +919,36 @@ github.com/linode/linodego v1.9.1 h1:29UpEPpYcGFnbwiJW8mbk/bjBZpgd/pv68io2IKTo34 github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/lpabon/godbc v0.1.1 h1:ilqjArN1UOENJJdM34I2YHKmF/B0gGq4VLoSGy9iAao= -github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= -github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -839,28 +956,25 @@ github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/moby/ipvs v1.0.1 h1:aoZ7fhLTXgDbzVrAnvV+XbKOU8kOET7B3+xULDF/1o0= -github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= +github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ= +github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -881,7 +995,6 @@ github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2 github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -890,37 +1003,56 @@ github.com/mwitkow/grpc-proxy v0.0.0-20230212185441-f345521cb9c9/go.mod h1:MvMXo github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= -github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.5.0 h1:TRtrvv2vdQqzkwrQ1ke6vtXf7IK34RBUJafIy1wMwls= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= +github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc= +github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk= +github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= +github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= +github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= +github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw= +github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= +github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -938,9 +1070,9 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= -github.com/opencontainers/runc v1.1.6 h1:XbhB8IfG/EsnhNvZtNdLB0GBw92GYEFvKlhaJk9jUgA= -github.com/opencontainers/runc v1.1.6/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= +github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk= +github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -956,20 +1088,21 @@ github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xA github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/openkruise/kruise-api v1.3.0 h1:yfEy64uXgSuX/5RwePLbwUK/uX8RRM8fHJkccel5ZIQ= -github.com/openkruise/kruise-api v1.3.0/go.mod h1:9ZX+ycdHKNzcA5ezAf35xOa2Mwfa2BYagWr0lKgi5dU= +github.com/openkruise/kruise-api v1.5.0 h1:Pbg5sr0AiJ+Tb8MBoPKrQLD4jOeI66zGhC4q2UG5i4Y= +github.com/openkruise/kruise-api v1.5.0/go.mod h1:C+ZilhxrMUiX6EYDIOsNowyu3i6pTHL6s0B9dxf2zAk= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/paypal/load-watcher v0.2.1/go.mod h1:MMCDf8aXF5k+K2q6AtMOBZItCvZ3oFAk+zNO4OAtp0w= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= -github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= +github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -978,14 +1111,14 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -994,17 +1127,19 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1013,12 +1148,13 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.38.0 h1:VTQitp6mXTdUoCmDMugDVOJ1opi6ADftKfp/yeqTR/E= -github.com/prometheus/common v0.38.0/go.mod h1:MBXfmBQZrK5XpbCkjofnXs96LD2QQ7fEq4C0xjC/yec= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -1029,29 +1165,30 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/prometheus/prometheus v0.39.2 h1:bGHQecMXUvHHr4PWf02yOG/fh6XQaCVzKYlY+WsQmyE= github.com/prometheus/prometheus v0.39.2/go.mod h1:GjQjgLhHMc0oo4Ko7qt/yBSJMY4hUoiAZwsYQgjaePA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/quobyte/api v0.1.8 h1:+sOX1gIlC/OaLipqVZWrHgly9Kh9Qo8OygeS0mWAg30= -github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= github.com/rasky/go-xdr v0.0.0-20170217172119-4930550ba2e2/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 h1:if3/24+h9Sq6eDx8UUz1SO9cT9tizyIsATfB7b4D3tc= github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= @@ -1059,34 +1196,11 @@ github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZX github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= +github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= -github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= -github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= -github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= -github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= -github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= -github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= -github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= -github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= -github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= -github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= -github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= -github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= -github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1094,49 +1208,43 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/storageos/go-api v2.2.0+incompatible h1:U0SablXoZIg06gvSlg8BCdzq1C/SkHVygOVX95Z2MU0= -github.com/storageos/go-api v2.2.0+incompatible/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -1150,29 +1258,28 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= -github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= -github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +github.com/ugorji/go/codec v1.2.9 h1:rmenucSohSTiyL09Y+l2OCk+FrMxGMzho2+tjr5ticU= +github.com/ugorji/go/codec v1.2.9/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= -github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -1181,10 +1288,11 @@ github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:tw github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vmware/govmomi v0.30.0 h1:Fm8ugPnnlMSTSceDKY9goGvjmqc6eQLPUSUeNXdpeXA= -github.com/vmware/govmomi v0.30.0/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY= +github.com/vishvananda/netns v0.0.2/go.mod h1:yitZXdAVI+yPFSb4QUe+VW3vOVl4PZPNcBgbPxAtJxw= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vmware/govmomi v0.30.6 h1:O3tjSwQBy0XwI5uK1/yVIfQ1LP9bAECEDUfifnyGs9U= +github.com/vmware/govmomi v0.30.6/go.mod h1:epgoslm97rLECMV4D+08ORzUBEU7boFSepKjt7AYVGg= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -1192,124 +1300,147 @@ github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xhit/go-str2duration v1.2.0/go.mod h1:3cPSlfZlUHVlneIVfePFWcJZsuwf+P1v2SRTV4cUmp4= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.5 h1:BX4JIbQ7hl7+jL+g+2j5UAr0o1bctCm6/Ct+ArBGkf0= -go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.5 h1:9S0JUVvmrVl7wCF39iTQthdaaNIiAaQbmK75ogO6GU8= -go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= -go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= -go.etcd.io/etcd/client/v3 v3.5.5 h1:q++2WTJbUgpQu4B6hCuT7VkdwaTP7Qz6Daak3WzbrlI= -go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c= -go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0 h1:jk8D/lwGEDlQU9kZXUFMSANkE22Sg5+mW27ip8xcF9E= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= +go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= +go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= +go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= +go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo= +go.etcd.io/etcd/client/v2 v2.305.9/go.mod h1:0NBdNx9wbxtEQLwAQtrDHwx58m02vXpDcgSYI2seohQ= +go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= +go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= +go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ= +go.etcd.io/etcd/pkg/v3 v3.5.9/go.mod h1:BZl0SAShQFk0IpLWR78T/+pyt8AruMHhTNNX73hkNVY= +go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= +go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= +go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= +go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= +go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 h1:Q3C9yzW6I9jqEc8sawxzxZmY48fs9u220KXq6d5s3XU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0 h1:KQjX0qQ8H21oBUAvFp4ZLKJMMLIluONvSPDAFIGmX58= +go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.35.0/go.mod h1:DQYkU9srMFqLUTVA/7/WlRHdnYDB7wyMMlle2ktMjfI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.2 h1:0xM1ukX7TeKmHZSPrE6ISujp4wUmXtqHJUVuNKtNtoY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.2/go.mod h1:0jErEUghjmDAC8iXe3lm3XJoJ1jblePjblQneGQAnrI= +go.opentelemetry.io/contrib/propagators/b3 v1.10.0 h1:6AD2VV8edRdEYNaD8cNckpzgdMLU2kbV9OYyxt2kvCg= +go.opentelemetry.io/contrib/propagators/b3 v1.10.0/go.mod h1:oxvamQ/mTDFQVugml/uFS59+aEUnFLhmd1wsG+n5MOE= +go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= +go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= +go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= +go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 h1:pDDYmo0QadUPal5fwXoY1pmMpFcdyhXOmL5drCrI3vU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 h1:KtiUEhQmj/Pa874bVYKGNVdq8NPKiacPbaRRtgXi+t4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +go.opentelemetry.io/otel/metric v0.32.2 h1:q4il3sGUfyfGJIJgjYwEnwWoI4XAHitisQ/Z2y9N3PA= +go.opentelemetry.io/otel/metric v0.32.2/go.mod h1:iLPP7FaKMAD5BIxJ2VX7f2KTuz//0QK2hEUyti5psqQ= +go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= +go.opentelemetry.io/otel/sdk v1.10.0 h1:jZ6K7sVn04kk/3DNUdJ4mqRlGDiXAVuIG+MMENpTNdY= +go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= +go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= +go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= +go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= +go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670 h1:18EFjUmQOcUvxNYSkA6jO9VAiXCnxFY6NyDX0bHDmkU= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20210220032938-85be41e4509f/go.mod h1:I6l2HNBLBZEcrOoCpyKLdY2lHoRZ8lI4x60KMCQDft4= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 h1:tnebWN09GYg9OLPss1KXj8txwZc6X6uMr6VFdcGNbHw= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1317,42 +1448,39 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1360,7 +1488,6 @@ golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1379,61 +1506,62 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= -golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1447,21 +1575,20 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1491,6 +1618,7 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1504,15 +1632,12 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1522,92 +1647,95 @@ golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1616,6 +1744,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1626,7 +1755,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1644,27 +1772,27 @@ golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1672,226 +1800,37 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.96.0 h1:F60cuQPJq7K7FzsxMYHAUJSiXh2oKctHxBMbDygxhfM= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210401141331-865547bb08e2/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006 h1:mmbq5q8M1t7dhkLw320YK4PsOXm6jdnUAkErImaIqOg= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1903,28 +1842,23 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1944,107 +1878,131 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.24.15 h1:5Zz9fnII9fR2Pouj8YMnsB2BEg9fzXRHo2Ed9Js62GU= -k8s.io/api v0.24.15/go.mod h1:lPoMB2MmzSUa3R8iGI4GapLCYnOcbFT2SHPBR4mPt6E= -k8s.io/apiextensions-apiserver v0.24.15 h1:pucsd2ktWmDNSnHNnkerbrlRM2yWffjW4xP7SCvemyQ= -k8s.io/apiextensions-apiserver v0.24.15/go.mod h1:zyMke4waTm2IX79y6qOehGZNYoaVtVtdyCkfa/nIzp8= -k8s.io/apimachinery v0.24.15 h1:yyPEqYqYNAZL+ybqyCo4pzfxQLYezRJITFiaTmEFr7M= -k8s.io/apimachinery v0.24.15/go.mod h1:kSzhCwldu9XB172NDdLffRN0sJ3x95RR7Bmyc4SHhs0= -k8s.io/apiserver v0.24.15 h1:ejo5nBFYXj4e9HHoa2CnkjL5DHcrkOw5reZzTEfCxGk= -k8s.io/apiserver v0.24.15/go.mod h1:Em6A8ake05gNti4mO06/3xNCK+WUYMXQ5Abvh5661Po= -k8s.io/cli-runtime v0.24.15/go.mod h1:eDJ6WBuppnPCoobl1N0nhtrsMrzFp5qh2eNLgYQmFtM= -k8s.io/client-go v0.24.15 h1:0gPvSAykRxvl8vD7kPA8JvAWQJuwrSEZkfgSNb1T7q8= -k8s.io/client-go v0.24.15/go.mod h1:200/+zvHmX6fL97NBMa/ndRbJzc3Kwh5um0U2KgCiTA= -k8s.io/cloud-provider v0.24.15 h1:df1oGEueADR2H4T4+vzsihzNufvkTIKJb8xBig5NqT0= -k8s.io/cloud-provider v0.24.15/go.mod h1:qybXQRPoszAxCA7P5Q8mpUKKs8FekeAXS9DdsGIdoPE= -k8s.io/cluster-bootstrap v0.24.15/go.mod h1:NJiCLBvofF7AvwnLHs6AL7DXNQ2ReRRfJI9K4+oMe88= -k8s.io/code-generator v0.24.15 h1:I8iyUiIXFKDhVoxMRgdpqSk4NfDQrA1Aiqumr41X2vE= -k8s.io/code-generator v0.24.15/go.mod h1:nQvp6VgOfRkKiLyMz+/JTNXNS6Q4bGWOVtB5rKd2TV0= -k8s.io/component-base v0.24.15 h1:HcZCS3OLuKPYa41BfEVr1g04bparNoGm9ClSJEV3yHc= -k8s.io/component-base v0.24.15/go.mod h1:ENK7pDtlgREVx2mtiLxVRFO9Liqq3ftVLi22gDp69xk= -k8s.io/component-helpers v0.24.15 h1:Zjsqs3uGRjIGvZLES/Kz+zk2qJPY+fZUZp1EUCb/JlM= -k8s.io/component-helpers v0.24.15/go.mod h1:hdd7lSzD8iOQTewRQz0YzEEW7uDkYzF1eEGXPNZawIo= -k8s.io/controller-manager v0.24.15/go.mod h1:hiYfD3OTtGeRPkuzvwY21iifftE76oLN7iTuGBmUXCo= -k8s.io/cri-api v0.24.15 h1:xNVst8AZG7BejcmwWXoVyJRsRVwOwYZNokhpnWqN3bw= -k8s.io/cri-api v0.24.15/go.mod h1:/KjYFI3BwY8/ytCQa0+Hla8FyGpDps88N45g4jet5I4= -k8s.io/csi-translation-lib v0.24.15 h1:M9XaLwSqK/YqSopjlCq42DOQRquORJXtjzeYqupWDDo= -k8s.io/csi-translation-lib v0.24.15/go.mod h1:+9VQt/SYAAscI+faj/bSniKyr9frhceI2MD/3/ESuAg= +k8s.io/api v0.28.7 h1:YKIhBxjXKaxuxWJnwohV0aGjRA5l4IU0Eywf/q19AVI= +k8s.io/api v0.28.7/go.mod h1:y4RbcjCCMff1930SG/TcP3AUKNfaJUgIeUp58e/2vyY= +k8s.io/apiextensions-apiserver v0.28.7 h1:NQlzP/vmvIO9Qt7wQTdMe9sGWGkozQZMPk9suehAvR8= +k8s.io/apiextensions-apiserver v0.28.7/go.mod h1:ST+ZOppyy+Z0mIxezSOK8qwIXctNwdFLNpGkQp8bw4M= +k8s.io/apimachinery v0.28.7 h1:2Z38/XRAOcpb+PonxmBEmjG7hBfmmr41xnr0XvpTnB4= +k8s.io/apimachinery v0.28.7/go.mod h1:QFNX/kCl/EMT2WTSz8k4WLCv2XnkOLMaL8GAVRMdpsA= +k8s.io/apiserver v0.28.7 h1:J8sQsOi+eA97+LGKve7ysYwfjFBf1sjPP/IFrrv6UdU= +k8s.io/apiserver v0.28.7/go.mod h1:Ui6QxEMRsE4ah7NGiocso7Zep70R8zV9r4OUNGso7/k= +k8s.io/cli-runtime v0.28.7/go.mod h1:Y11JDqc/y2zwGjTuhKzqLAPyRt+QDjWbqaXPtNfrGdY= +k8s.io/client-go v0.28.7 h1:3L6402+tjmOl8twX3fjUQ/wsYAkw6UlVNDVP+rF6YGA= +k8s.io/client-go v0.28.7/go.mod h1:xIoEaDewZ+EwWOo1/F1t0IOKMPe1rwBZhLu9Es6y0tE= +k8s.io/cloud-provider v0.28.7 h1:/UmsdF2QU+U7xohJw+kYVsq2uEsMIHuaLe9SEXig/I8= +k8s.io/cloud-provider v0.28.7/go.mod h1:eIsSt8QiSpJRcJXRbfi60w+kJ6d/sk2U8EQ0K/YN24o= +k8s.io/cluster-bootstrap v0.28.7/go.mod h1:KPz5rbijbaDlFzJLyERnjVJgsIBsPrNft5X6kRvpeM0= +k8s.io/code-generator v0.28.7 h1:s+Md/llHFPRtKA+m+NGnrdx5QJ3iVZQ3gx1B7l6R7xE= +k8s.io/code-generator v0.28.7/go.mod h1:IaYGMqYjgj0zE3L9mnHo7hIL9GkY08GvGyyracaIxTA= +k8s.io/component-base v0.28.7 h1:Cq5aQ52N0CTaOMiary4rXzR4RoTP77Z3ll4qSg4qH7s= +k8s.io/component-base v0.28.7/go.mod h1:RrtNBKrSuckksSQ3fV9PhwBSHO/ZbwJXM2Z0OPx+UJk= +k8s.io/component-helpers v0.28.7 h1:YPdfkbl5GEuWVv6r2kNPRbqFWd7Rduk0A+tqnQpKPWA= +k8s.io/component-helpers v0.28.7/go.mod h1:kMAWTPtouyxk+jIAx6eLGRkaSRN87ompmLyLkUhvpKg= +k8s.io/controller-manager v0.28.7 h1:ydHuFsyP+ani/AZioMltwnd0WEddvoiha64sp4gOGV8= +k8s.io/controller-manager v0.28.7/go.mod h1:EGtoGg/vN0Crr+xi615nVGlginAuVHbGfC55po+dfQo= +k8s.io/cri-api v0.28.7 h1:n5cy5cn74Rr+FL/CnmaO1Y+JMghQ+h+uV6jC0mhz84Q= +k8s.io/cri-api v0.28.7/go.mod h1:zWVXihHsxPjRh4EBQHirnniYdqE4SmnLzv2nn56iL+Q= +k8s.io/csi-translation-lib v0.28.7 h1:4T5LlsTvPaAGvsMPDv9lMt1Kfa7erPXQ/Lb6rsv+iKM= +k8s.io/csi-translation-lib v0.28.7/go.mod h1:5cpc9VBpoRLyVIk6gdv9/eyuW5XMta/xq6N0Ix8UG+I= +k8s.io/dynamic-resource-allocation v0.28.7 h1:GyXhPtBKakLFQwKslFYrYsGc7I+E90cdwH6xNS3RmDM= +k8s.io/dynamic-resource-allocation v0.28.7/go.mod h1:Yn8RoOyKv1ImQ4Wa7F2IXIdm6SWOOA/PzPak4V0GJSo= +k8s.io/endpointslice v0.28.7/go.mod h1:QBMzKxQkgH8UXOOVRldIKWE+BinurxfIOsO8Gpmbgfk= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/hack/tools v0.0.0-20210917071902-331d2323a192/go.mod h1:DXW3Mv8xqJvjXWiBSBHrK2O4mq5LMD0clqkv3b1g9HA= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-aggregator v0.24.15/go.mod h1:nXL5l4/YvRHuA5u8+S43hR/2waj5MPzoN5hiAGucGJ4= -k8s.io/kube-controller-manager v0.24.15/go.mod h1:ueERlQER5/eh8nFWaW85LLUcdmGpMfWZMHMaTCIZ/vU= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/kube-proxy v0.24.15 h1:u/8SOdJL3+a5LJyoRw31hEJXPCWx+6tPoV5GCggNnfY= -k8s.io/kube-proxy v0.24.15/go.mod h1:K6NqM4uC17E8TALRonDP/JoL7dfCQOX7NLS6lAKJ7qE= -k8s.io/kube-scheduler v0.24.15 h1:nDp4hkYQC28VS2eBHhTWQD1pzxVEH+yhZSrEwfZ+Ef0= -k8s.io/kube-scheduler v0.24.15/go.mod h1:q1o+4b4aA/Db6vK6wKSD2lPTjKNJ8hOEAEMuUpk81KM= -k8s.io/kubectl v0.24.15 h1:KyG/fw+nRAUZLySVCLCEXrhdUfN3Q+rviZa3LRfiqkE= -k8s.io/kubectl v0.24.15/go.mod h1:9yGQvq7RyIj11iVOTYaIsrSNXZrEfeX77EgykJ+gSUk= -k8s.io/kubelet v0.24.15 h1:zcWFOjObF+lgQcl/e+uLCUnMQqWt5/5oGQJcTZt0m1U= -k8s.io/kubelet v0.24.15/go.mod h1:ST1Ry8+xA0FajG3gPBXKiEK7JPCPqRJnefiua8NKjDc= -k8s.io/kubernetes v1.24.15 h1:FupxlSyYgbz22yjGVZ7dxH+azhuO0OU8MnACdydrBzQ= -k8s.io/kubernetes v1.24.15/go.mod h1:MlcoxAWSYrfeOwlfRNne7zYyZsHmlT3dlw7v3xzDnDM= -k8s.io/legacy-cloud-providers v0.24.15 h1:0yLKue7fbat+rXdZoJRVP+iQ/y0Oxy0tHIwU0WKIAQg= -k8s.io/legacy-cloud-providers v0.24.15/go.mod h1:sj/vZmVN9070GMNU9cuqSTupFI7ErHjT+bMXSc0rvac= -k8s.io/metrics v0.24.15 h1:DdQZ6/7/yxkg856MlNlyS3bYWeRoi764kElIQuZb5bs= -k8s.io/metrics v0.24.15/go.mod h1:0ZqaLxkIiopW4h1QfW5qaUZeajmLVxrwJJvWEljRYSM= -k8s.io/mount-utils v0.24.15 h1:q3sm4Gcp00iWXUInIEi5x8CqAmy2chmUTedIZdUxRkg= -k8s.io/mount-utils v0.24.15/go.mod h1:Xjtb0dquC5PG63kOD8shViqRczdkdQqW5Pc/rlmbsiU= -k8s.io/pod-security-admission v0.24.15/go.mod h1:P6OG7cc3DzmqN5Ydu7wx7+mJmNEZAulAgc5yATPY71U= -k8s.io/sample-apiserver v0.24.15/go.mod h1:ztKPKvVN/V1mHBZxe3rijzkggQ05nAJcXhtg2sDVv9c= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kms v0.28.7 h1:cvzpVHD+me7lRPqRJSeZvbKK8e9wob+iBx4mpg+9wa4= +k8s.io/kms v0.28.7/go.mod h1:t+mtSTTV8FiJ+87zSQ7FPn/F8TNzBP1/zTYFH69pX8c= +k8s.io/kube-aggregator v0.28.7/go.mod h1:elikzGl/2Y2UCTuAg6tL/k5yxWE4Zefyfw1YJ3S6W80= +k8s.io/kube-controller-manager v0.28.7/go.mod h1:qL0Z+9vp5UxyFZ8AFwOjsv72xCe+fc2C3XUwoi7Afpg= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/kube-proxy v0.28.7/go.mod h1:yI4RkKhthS24EV6Oar2kgv68sVj6TNqDGdHmDciD0nk= +k8s.io/kube-scheduler v0.28.7 h1:bNQU6RtO7kzZm4gzd2vyhXJIkZ8sujRi+n2D00fqtyI= +k8s.io/kube-scheduler v0.28.7/go.mod h1:rBiZJ6Fz+xpodbJGv/SMlf+Thvmc0SB/wGR9OfVUIM8= +k8s.io/kubectl v0.28.7 h1:XUV9lP7K2azjBjrGo9UPIRPFj6ofXF8I2lK7wSQehBg= +k8s.io/kubectl v0.28.7/go.mod h1:LFUYAHkMtiijT8ZpOqEcFrVhNUAt0W0KluxvLix1+vg= +k8s.io/kubelet v0.28.7 h1:EruF3Ajdta6fza7OK4fTpS3kV6b4XOKWIkvosMFQJVg= +k8s.io/kubelet v0.28.7/go.mod h1:p9/lHBv+aO9os2D3ygCs4x9jEC2GWAH9OdeC5misTuU= +k8s.io/kubernetes v1.28.7 h1:XV7AiCqtuOLaERR7uWcJnVpGG090lzJ9A37ftQuJhN4= +k8s.io/kubernetes v1.28.7/go.mod h1:0qpyGJTR3blkbQOmZA3Z0u1VDZJNxJM8ifLUVNJN0X8= +k8s.io/legacy-cloud-providers v0.28.7 h1:eEwL1towRRMERndSHcrJbLHS6U5qDD8HE99P/TgTHdM= +k8s.io/legacy-cloud-providers v0.28.7/go.mod h1:V6x8CwagaeGGe9rBvRwhSkuSJ1oGFXWwndLyAZbJFuw= +k8s.io/metrics v0.28.7 h1:DcxklECVz2NYVWmk6iK9dAmCHqCM0QxPAQtMA66a+kU= +k8s.io/metrics v0.28.7/go.mod h1:YE9kCsZPhx2i93xK2BjWdpWGS1CYyv9NSMlPkob6hDc= +k8s.io/mount-utils v0.28.7 h1:E9cq0QEbUmi76SqMQYBrKLT/EzpfWSEhkshYOjf3usM= +k8s.io/mount-utils v0.28.7/go.mod h1:pAnxhVbtMafSzAmFBgIfCdbM52+ObB7bBIN4MpuWk2o= +k8s.io/pod-security-admission v0.28.7/go.mod h1:1JG3Lq2dSo04jBUAKqKZM/pQUEfV5vdC2y9DtBiWrqs= +k8s.io/sample-apiserver v0.28.7/go.mod h1:9O6gyWNljNJtLirRam6oZYatFU7yszM35E4eLHVrw6U= k8s.io/system-validators v1.8.0/go.mod h1:gP1Ky+R9wtrSiFbrpEPwWMeYz9yqyy1S/KOh0Vci7WI= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37 h1:fAPTNEpzQMOLMGwOHNbUkR2xXTQwMJOZYNx+/mLlOh0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.37/go.mod h1:vfnxT4FXNT8eGvO+xi/DsyC/qHmdujqwrUa1WSspCsk= -sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= -sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= +sigs.k8s.io/controller-runtime v0.16.5 h1:yr1cEJbX08xsTW6XEIzT13KHHmIyX8Umvme2cULvFZw= +sigs.k8s.io/controller-runtime v0.16.5/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20231005234617-5771399a8ce5 h1:q6JvS/AwlDfe9sHMTo1KOMdI376+mlB21pV7Xhfy5uA= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20231005234617-5771399a8ce5/go.mod h1:B6HLcvOy2S1qq2eWOFm9xepiKPMIc8Z9OXSPsnUDaR4= sigs.k8s.io/descheduler v0.26.1-0.20230402001301-90905d2c2194 h1:xPrRjhoJr6pst13/3UHNwATYAsJROWcv+vvGRa+Fk1Q= sigs.k8s.io/descheduler v0.26.1-0.20230402001301-90905d2c2194/go.mod h1:/z7jjqyhgYDSd+LclGulcqX/JgfGIOTNB7ohFlGNQAQ= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI= -sigs.k8s.io/kustomize/cmd/config v0.10.6/go.mod h1:/S4A4nUANUa4bZJ/Edt7ZQTyKOY9WCER0uBS1SW2Rco= -sigs.k8s.io/kustomize/kustomize/v4 v4.5.4/go.mod h1:Zo/Xc5FKD6sHl0lilbrieeGeZHVYCA4BzxeAaLI05Bg= -sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg= -sigs.k8s.io/scheduler-plugins v0.22.6 h1:OlaFsEpldL4Fr38R46Q2j0bX8FFhGkSvWfZPTB5Y++M= -sigs.k8s.io/scheduler-plugins v0.22.6/go.mod h1:PJpHreZ7729sZVzrB8KUGKXlWDCvXaZL9lzvAS0gNZ8= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.13.4/go.mod h1:Bkaavz5RKK6ZzP0zgPrB7QbpbBJKiHuD3BB0KujY7Ls= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= +sigs.k8s.io/kustomize/cmd/config v0.11.2/go.mod h1:PCpHxyu10daTnbMfn3xhH1vppn7L8jsS3qpRKXb7Lkc= +sigs.k8s.io/kustomize/kustomize/v5 v5.0.4-0.20230601165947-6ce0bf390ce3/go.mod h1:/d88dHCvoy7d0AKFT0yytezSGZKjsZBVs9YTkBHSGFk= +sigs.k8s.io/kustomize/kyaml v0.14.2/go.mod h1:AN1/IpawKilWD7V+YvQwRGUvuUOOWpjsHu6uHwonSF4= +sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/hack/fix_crd_plural.sh b/hack/fix_crd_plural.sh deleted file mode 100755 index 4266eea19..000000000 --- a/hack/fix_crd_plural.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright 2017 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# replace corrected plural/singular name of nodeslo crd. -sed -in-place -e 's/ name: nodesloes.slo.koordinator.sh/ name: nodeslos.slo.koordinator.sh/g' config/crd/bases/slo.koordinator.sh_nodesloes.yaml -sed -in-place -e 's/plural: nodesloes$/plural: nodeslos/g' config/crd/bases/slo.koordinator.sh_nodesloes.yaml - -rm -f config/crd/bases/slo.koordinator.sh_nodesloes.yamln-place -mv config/crd/bases/slo.koordinator.sh_nodesloes.yaml config/crd/bases/slo.koordinator.sh_nodeslos.yaml - diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 2b93e473e..f29a57c69 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -56,7 +56,7 @@ ${SCRIPT_ROOT}/hack/generate-internal-groups.sh \ github.com/koordinator-sh/koordinator/pkg/scheduler/apis/generated \ github.com/koordinator-sh/koordinator/pkg/scheduler/apis \ github.com/koordinator-sh/koordinator/pkg/scheduler/apis \ - "config:v1beta2" \ + "config:v1beta3" \ --output-base "${TEMP_DIR}" \ --go-header-file hack/boilerplate/boilerplate.go.txt diff --git a/hack/update-license-header.sh b/hack/update-license-header.sh index 4e5c6ac35..da977e038 100755 --- a/hack/update-license-header.sh +++ b/hack/update-license-header.sh @@ -17,7 +17,7 @@ PROJECT=$(cd $(dirname $0)/..; pwd) -LICENSEHEADERCHECKER_VERSION=v1.4.0 +LICENSEHEADERCHECKER_VERSION=v1.5.0 GOBIN=${PROJECT}/bin go install github.com/lluissm/license-header-checker/cmd/license-header-checker@${LICENSEHEADERCHECKER_VERSION} diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index c53d57782..6636bb261 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -41,8 +41,7 @@ type Interface interface { SloV1alpha1() slov1alpha1.SloV1alpha1Interface } -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. +// Clientset contains the clients for groups. type Clientset struct { *discovery.DiscoveryClient analysisV1alpha1 *analysisv1alpha1.AnalysisV1alpha1Client diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index 7e9537883..76f8e0fc8 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -45,14 +45,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index 3600010d6..8a4b86768 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -45,14 +45,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/pkg/client/clientset/versioned/typed/analysis/v1alpha1/fake/fake_recommendation.go b/pkg/client/clientset/versioned/typed/analysis/v1alpha1/fake/fake_recommendation.go index dfc543373..8cb480f06 100644 --- a/pkg/client/clientset/versioned/typed/analysis/v1alpha1/fake/fake_recommendation.go +++ b/pkg/client/clientset/versioned/typed/analysis/v1alpha1/fake/fake_recommendation.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/koordinator-sh/koordinator/apis/analysis/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeRecommendations struct { ns string } -var recommendationsResource = schema.GroupVersionResource{Group: "config.koordinator.sh", Version: "v1alpha1", Resource: "recommendations"} +var recommendationsResource = v1alpha1.SchemeGroupVersion.WithResource("recommendations") -var recommendationsKind = schema.GroupVersionKind{Group: "config.koordinator.sh", Version: "v1alpha1", Kind: "Recommendation"} +var recommendationsKind = v1alpha1.SchemeGroupVersion.WithKind("Recommendation") // Get takes name of the recommendation, and returns the corresponding recommendation object, and an error if there is any. func (c *FakeRecommendations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Recommendation, err error) { diff --git a/pkg/client/clientset/versioned/typed/config/v1alpha1/fake/fake_clustercolocationprofile.go b/pkg/client/clientset/versioned/typed/config/v1alpha1/fake/fake_clustercolocationprofile.go index 9ea896212..f42826374 100644 --- a/pkg/client/clientset/versioned/typed/config/v1alpha1/fake/fake_clustercolocationprofile.go +++ b/pkg/client/clientset/versioned/typed/config/v1alpha1/fake/fake_clustercolocationprofile.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/koordinator-sh/koordinator/apis/config/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeClusterColocationProfiles struct { Fake *FakeConfigV1alpha1 } -var clustercolocationprofilesResource = schema.GroupVersionResource{Group: "config.koordinator.sh", Version: "v1alpha1", Resource: "clustercolocationprofiles"} +var clustercolocationprofilesResource = v1alpha1.SchemeGroupVersion.WithResource("clustercolocationprofiles") -var clustercolocationprofilesKind = schema.GroupVersionKind{Group: "config.koordinator.sh", Version: "v1alpha1", Kind: "ClusterColocationProfile"} +var clustercolocationprofilesKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterColocationProfile") // Get takes name of the clusterColocationProfile, and returns the corresponding clusterColocationProfile object, and an error if there is any. func (c *FakeClusterColocationProfiles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterColocationProfile, err error) { diff --git a/pkg/client/clientset/versioned/typed/quota/v1alpha1/fake/fake_elasticquotaprofile.go b/pkg/client/clientset/versioned/typed/quota/v1alpha1/fake/fake_elasticquotaprofile.go index d61158334..134ea46a9 100644 --- a/pkg/client/clientset/versioned/typed/quota/v1alpha1/fake/fake_elasticquotaprofile.go +++ b/pkg/client/clientset/versioned/typed/quota/v1alpha1/fake/fake_elasticquotaprofile.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/koordinator-sh/koordinator/apis/quota/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeElasticQuotaProfiles struct { ns string } -var elasticquotaprofilesResource = schema.GroupVersionResource{Group: "quota.koordinator.sh", Version: "v1alpha1", Resource: "elasticquotaprofiles"} +var elasticquotaprofilesResource = v1alpha1.SchemeGroupVersion.WithResource("elasticquotaprofiles") -var elasticquotaprofilesKind = schema.GroupVersionKind{Group: "quota.koordinator.sh", Version: "v1alpha1", Kind: "ElasticQuotaProfile"} +var elasticquotaprofilesKind = v1alpha1.SchemeGroupVersion.WithKind("ElasticQuotaProfile") // Get takes name of the elasticQuotaProfile, and returns the corresponding elasticQuotaProfile object, and an error if there is any. func (c *FakeElasticQuotaProfiles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ElasticQuotaProfile, err error) { diff --git a/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_device.go b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_device.go index f9bda741a..f132527d8 100644 --- a/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_device.go +++ b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_device.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/koordinator-sh/koordinator/apis/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeDevices struct { Fake *FakeSchedulingV1alpha1 } -var devicesResource = schema.GroupVersionResource{Group: "scheduling.koordinator.sh", Version: "v1alpha1", Resource: "devices"} +var devicesResource = v1alpha1.SchemeGroupVersion.WithResource("devices") -var devicesKind = schema.GroupVersionKind{Group: "scheduling.koordinator.sh", Version: "v1alpha1", Kind: "Device"} +var devicesKind = v1alpha1.SchemeGroupVersion.WithKind("Device") // Get takes name of the device, and returns the corresponding device object, and an error if there is any. func (c *FakeDevices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Device, err error) { diff --git a/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_podmigrationjob.go b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_podmigrationjob.go index e50d6caaa..52668f38c 100644 --- a/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_podmigrationjob.go +++ b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_podmigrationjob.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/koordinator-sh/koordinator/apis/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakePodMigrationJobs struct { Fake *FakeSchedulingV1alpha1 } -var podmigrationjobsResource = schema.GroupVersionResource{Group: "scheduling.koordinator.sh", Version: "v1alpha1", Resource: "podmigrationjobs"} +var podmigrationjobsResource = v1alpha1.SchemeGroupVersion.WithResource("podmigrationjobs") -var podmigrationjobsKind = schema.GroupVersionKind{Group: "scheduling.koordinator.sh", Version: "v1alpha1", Kind: "PodMigrationJob"} +var podmigrationjobsKind = v1alpha1.SchemeGroupVersion.WithKind("PodMigrationJob") // Get takes name of the podMigrationJob, and returns the corresponding podMigrationJob object, and an error if there is any. func (c *FakePodMigrationJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodMigrationJob, err error) { diff --git a/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_reservation.go b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_reservation.go index cd2552266..c0e01ffa6 100644 --- a/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_reservation.go +++ b/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_reservation.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/koordinator-sh/koordinator/apis/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeReservations struct { Fake *FakeSchedulingV1alpha1 } -var reservationsResource = schema.GroupVersionResource{Group: "scheduling.koordinator.sh", Version: "v1alpha1", Resource: "reservations"} +var reservationsResource = v1alpha1.SchemeGroupVersion.WithResource("reservations") -var reservationsKind = schema.GroupVersionKind{Group: "scheduling.koordinator.sh", Version: "v1alpha1", Kind: "Reservation"} +var reservationsKind = v1alpha1.SchemeGroupVersion.WithKind("Reservation") // Get takes name of the reservation, and returns the corresponding reservation object, and an error if there is any. func (c *FakeReservations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Reservation, err error) { diff --git a/pkg/client/clientset/versioned/typed/slo/v1alpha1/fake/fake_nodemetric.go b/pkg/client/clientset/versioned/typed/slo/v1alpha1/fake/fake_nodemetric.go index 1fe831c86..6834cc74b 100644 --- a/pkg/client/clientset/versioned/typed/slo/v1alpha1/fake/fake_nodemetric.go +++ b/pkg/client/clientset/versioned/typed/slo/v1alpha1/fake/fake_nodemetric.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/koordinator-sh/koordinator/apis/slo/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeNodeMetrics struct { Fake *FakeSloV1alpha1 } -var nodemetricsResource = schema.GroupVersionResource{Group: "slo.koordinator.sh", Version: "v1alpha1", Resource: "nodemetrics"} +var nodemetricsResource = v1alpha1.SchemeGroupVersion.WithResource("nodemetrics") -var nodemetricsKind = schema.GroupVersionKind{Group: "slo.koordinator.sh", Version: "v1alpha1", Kind: "NodeMetric"} +var nodemetricsKind = v1alpha1.SchemeGroupVersion.WithKind("NodeMetric") // Get takes name of the nodeMetric, and returns the corresponding nodeMetric object, and an error if there is any. func (c *FakeNodeMetrics) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodeMetric, err error) { diff --git a/pkg/client/clientset/versioned/typed/slo/v1alpha1/fake/fake_nodeslo.go b/pkg/client/clientset/versioned/typed/slo/v1alpha1/fake/fake_nodeslo.go index 5b503ac8f..d9a274552 100644 --- a/pkg/client/clientset/versioned/typed/slo/v1alpha1/fake/fake_nodeslo.go +++ b/pkg/client/clientset/versioned/typed/slo/v1alpha1/fake/fake_nodeslo.go @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/koordinator-sh/koordinator/apis/slo/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -35,9 +34,9 @@ type FakeNodeSLOs struct { Fake *FakeSloV1alpha1 } -var nodeslosResource = schema.GroupVersionResource{Group: "slo.koordinator.sh", Version: "v1alpha1", Resource: "nodeslos"} +var nodeslosResource = v1alpha1.SchemeGroupVersion.WithResource("nodeslos") -var nodeslosKind = schema.GroupVersionKind{Group: "slo.koordinator.sh", Version: "v1alpha1", Kind: "NodeSLO"} +var nodeslosKind = v1alpha1.SchemeGroupVersion.WithKind("NodeSLO") // Get takes name of the nodeSLO, and returns the corresponding nodeSLO object, and an error if there is any. func (c *FakeNodeSLOs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodeSLO, err error) { diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index db79275e9..35052d3d2 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -51,6 +51,11 @@ type sharedInformerFactory struct { // startedInformers is used for tracking which informers have been started. // This allows Start() to be called multiple times safely. startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool } // WithCustomResyncConfig sets a custom resync period for the specified informer types. @@ -111,20 +116,39 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy return factory } -// Start initializes all requested informers. func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { f.lock.Lock() defer f.lock.Unlock() + if f.shuttingDown { + return + } + for informerType, informer := range f.informers { if !f.startedInformers[informerType] { - go informer.Run(stopCh) + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() f.startedInformers[informerType] = true } } } -// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { informers := func() map[reflect.Type]cache.SharedIndexInformer { f.lock.Lock() @@ -146,7 +170,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref return res } -// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// InformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { f.lock.Lock() @@ -171,11 +195,58 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal // SharedInformerFactory provides shared informers for resources in all known // API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) type SharedInformerFactory interface { internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + Analysis() analysis.Interface Config() config.Interface Quota() quota.Interface diff --git a/pkg/descheduler/apis/config/validation/validation_pluginargs.go b/pkg/descheduler/apis/config/validation/validation_pluginargs.go index 2b67c8c5c..1d9817f91 100644 --- a/pkg/descheduler/apis/config/validation/validation_pluginargs.go +++ b/pkg/descheduler/apis/config/validation/validation_pluginargs.go @@ -59,7 +59,7 @@ func ValidateMigrationControllerArgs(path *field.Path, args *deschedulerconfig.M } if args.LabelSelector != nil { - allErrs = append(allErrs, metav1validation.ValidateLabelSelector(args.LabelSelector, field.NewPath("labelSelector"))...) + allErrs = append(allErrs, metav1validation.ValidateLabelSelector(args.LabelSelector, metav1validation.LabelSelectorValidationOptions{}, field.NewPath("labelSelector"))...) } // At most one of include/exclude can be set diff --git a/pkg/descheduler/controllers/migration/arbitrator/arbitrator_test.go b/pkg/descheduler/controllers/migration/arbitrator/arbitrator_test.go index d79e5ea6f..acc034f91 100644 --- a/pkg/descheduler/controllers/migration/arbitrator/arbitrator_test.go +++ b/pkg/descheduler/controllers/migration/arbitrator/arbitrator_test.go @@ -311,7 +311,7 @@ func TestAbortJobIfNonRetryablePodFilterFailed(t *testing.T) { scheme := runtime.NewScheme() _ = v1alpha1.AddToScheme(scheme) _ = clientgoscheme.AddToScheme(scheme) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + fakeClient := fake.NewClientBuilder().WithStatusSubresource(&v1alpha1.PodMigrationJob{}).WithScheme(scheme).Build() job := &v1alpha1.PodMigrationJob{ ObjectMeta: metav1.ObjectMeta{ @@ -451,7 +451,7 @@ func TestDoOnceArbitrate(t *testing.T) { scheme := runtime.NewScheme() _ = v1alpha1.AddToScheme(scheme) _ = clientgoscheme.AddToScheme(scheme) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + fakeClient := fake.NewClientBuilder().WithStatusSubresource(&v1alpha1.PodMigrationJob{}).WithScheme(scheme).Build() jobs := make([]*v1alpha1.PodMigrationJob, testCase.jobNum) podOfJob := map[*v1alpha1.PodMigrationJob]*corev1.Pod{} @@ -638,7 +638,7 @@ func TestUpdateFailedJob(t *testing.T) { scheme := runtime.NewScheme() _ = v1alpha1.AddToScheme(scheme) _ = clientgoscheme.AddToScheme(scheme) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(job).Build() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&v1alpha1.PodMigrationJob{}).WithObjects(job).Build() arbitrator := &arbitratorImpl{ waitingCollection: map[types.UID]*v1alpha1.PodMigrationJob{job.UID: job}, client: fakeClient, @@ -687,7 +687,7 @@ func TestEventHandler(t *testing.T) { assert.Nil(t, fakeClient.Create(context.TODO(), job)) assert.False(t, arbitrator.filter.checkJobPassedArbitration(job.UID)) - handler.Create(event.CreateEvent{Object: job}, queue) + handler.Create(context.TODO(), event.CreateEvent{Object: job}, queue) arbitrator.filter.markJobPassedArbitration(job.UID) assert.True(t, arbitrator.filter.checkJobPassedArbitration(job.UID)) @@ -701,17 +701,17 @@ func TestEventHandler(t *testing.T) { } assert.Equal(t, 0, queue.Len()) for _, job := range migratingJobs[:3] { - handler.Delete(event.DeleteEvent{Object: job}, queue) + handler.Delete(context.TODO(), event.DeleteEvent{Object: job}, queue) assert.False(t, arbitrator.filter.checkJobPassedArbitration(job.UID)) } migratingJobs[3].Status.Phase = v1alpha1.PodMigrationJobFailed assert.Nil(t, fakeClient.Update(context.TODO(), migratingJobs[3])) - handler.Update(event.UpdateEvent{ObjectNew: migratingJobs[3]}, queue) + handler.Update(context.TODO(), event.UpdateEvent{ObjectNew: migratingJobs[3]}, queue) assert.False(t, arbitrator.filter.checkJobPassedArbitration(migratingJobs[3].UID)) migratingJobs[4].Status.Phase = v1alpha1.PodMigrationJobSucceeded assert.Nil(t, fakeClient.Update(context.TODO(), migratingJobs[4])) - handler.Update(event.UpdateEvent{ObjectNew: migratingJobs[4]}, queue) + handler.Update(context.TODO(), event.UpdateEvent{ObjectNew: migratingJobs[4]}, queue) assert.False(t, arbitrator.filter.checkJobPassedArbitration(migratingJobs[4].UID)) } diff --git a/pkg/descheduler/controllers/migration/arbitrator/filter_test.go b/pkg/descheduler/controllers/migration/arbitrator/filter_test.go index 6e3a337f3..3e097b2d0 100644 --- a/pkg/descheduler/controllers/migration/arbitrator/filter_test.go +++ b/pkg/descheduler/controllers/migration/arbitrator/filter_test.go @@ -32,6 +32,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/clock" "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/koordinator-sh/koordinator/apis/scheduling/v1alpha1" @@ -43,7 +44,18 @@ func TestFilterExistingMigrationJob(t *testing.T) { scheme := runtime.NewScheme() _ = v1alpha1.AddToScheme(scheme) _ = clientgoscheme.AddToScheme(scheme) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithStatusSubresource(&v1alpha1.PodMigrationJob{}). + WithIndex(&v1alpha1.PodMigrationJob{}, "job.pod.uid", func(obj client.Object) []string { + pmj := obj.(*v1alpha1.PodMigrationJob) + return []string{string(pmj.Spec.PodRef.UID)} + }). + WithIndex(&v1alpha1.PodMigrationJob{}, "job.pod.namespacedName", func(obj client.Object) []string { + pmj := obj.(*v1alpha1.PodMigrationJob) + return []string{pmj.Spec.PodRef.Namespace + "/" + pmj.Spec.PodRef.Name} + }). + Build() a := filter{client: fakeClient, args: &config.MigrationControllerArgs{}, arbitratedPodMigrationJobs: map[types.UID]bool{}} pod := &corev1.Pod{ @@ -152,7 +164,20 @@ func TestFilterMaxMigratingPerNode(t *testing.T) { scheme := runtime.NewScheme() _ = v1alpha1.AddToScheme(scheme) _ = clientgoscheme.AddToScheme(scheme) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + fakeClient := fake.NewClientBuilder().WithScheme(scheme). + WithIndex(&corev1.Pod{}, "pod.spec.nodeName", func(object client.Object) []string { + pod := object.(*corev1.Pod) + return []string{pod.Spec.NodeName} + }). + WithIndex(&v1alpha1.PodMigrationJob{}, "job.pod.uid", func(obj client.Object) []string { + pmj := obj.(*v1alpha1.PodMigrationJob) + return []string{string(pmj.Spec.PodRef.UID)} + }). + WithIndex(&v1alpha1.PodMigrationJob{}, "job.pod.namespacedName", func(obj client.Object) []string { + pmj := obj.(*v1alpha1.PodMigrationJob) + return []string{pmj.Spec.PodRef.Namespace + "/" + pmj.Spec.PodRef.Name} + }). + Build() a := filter{client: fakeClient, args: &config.MigrationControllerArgs{}, arbitratedPodMigrationJobs: map[types.UID]bool{}} a.args.MaxMigratingPerNode = pointer.Int32(tt.maxMigrating) @@ -295,7 +320,16 @@ func TestFilterMaxMigratingPerNamespace(t *testing.T) { scheme := runtime.NewScheme() _ = v1alpha1.AddToScheme(scheme) _ = clientgoscheme.AddToScheme(scheme) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + fakeClient := fake.NewClientBuilder().WithScheme(scheme). + WithIndex(&v1alpha1.PodMigrationJob{}, "job.pod.uid", func(obj client.Object) []string { + pmj := obj.(*v1alpha1.PodMigrationJob) + return []string{string(pmj.Spec.PodRef.UID)} + }). + WithIndex(&v1alpha1.PodMigrationJob{}, "job.pod.namespace", func(obj client.Object) []string { + pmj := obj.(*v1alpha1.PodMigrationJob) + return []string{pmj.Spec.PodRef.Namespace} + }). + Build() a := filter{client: fakeClient, args: &config.MigrationControllerArgs{}, arbitratedPodMigrationJobs: map[types.UID]bool{}} a.args.MaxMigratingPerNamespace = pointer.Int32(tt.maxMigrating) @@ -464,7 +498,16 @@ func TestFilterMaxMigratingPerWorkload(t *testing.T) { scheme := runtime.NewScheme() _ = v1alpha1.AddToScheme(scheme) _ = clientgoscheme.AddToScheme(scheme) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + fakeClient := fake.NewClientBuilder().WithScheme(scheme). + WithIndex(&v1alpha1.PodMigrationJob{}, "job.pod.uid", func(obj client.Object) []string { + pmj := obj.(*v1alpha1.PodMigrationJob) + return []string{string(pmj.Spec.PodRef.UID)} + }). + WithIndex(&v1alpha1.PodMigrationJob{}, "job.pod.namespace", func(obj client.Object) []string { + pmj := obj.(*v1alpha1.PodMigrationJob) + return []string{pmj.Spec.PodRef.Namespace} + }). + Build() intOrString := intstr.FromInt(tt.maxMigrating) maxUnavailable := intstr.FromInt(int(tt.totalReplicas - 1)) @@ -670,7 +713,15 @@ func TestFilterMaxUnavailablePerWorkload(t *testing.T) { scheme := runtime.NewScheme() _ = v1alpha1.AddToScheme(scheme) _ = clientgoscheme.AddToScheme(scheme) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + fakeClient := fake.NewClientBuilder().WithScheme(scheme). + WithIndex(&v1alpha1.PodMigrationJob{}, "job.pod.uid", func(obj client.Object) []string { + pmj := obj.(*v1alpha1.PodMigrationJob) + return []string{string(pmj.Spec.PodRef.UID)} + }). + WithIndex(&v1alpha1.PodMigrationJob{}, "job.pod.namespace", func(obj client.Object) []string { + pmj := obj.(*v1alpha1.PodMigrationJob) + return []string{pmj.Spec.PodRef.Namespace} + }).Build() a := filter{client: fakeClient, args: &config.MigrationControllerArgs{}, arbitratedPodMigrationJobs: map[types.UID]bool{}} a.args.MaxMigratingPerWorkload = &intOrString a.args.MaxUnavailablePerWorkload = &maxUnavailable diff --git a/pkg/descheduler/controllers/migration/arbitrator/handler.go b/pkg/descheduler/controllers/migration/arbitrator/handler.go index 85c866e87..d27d7e766 100644 --- a/pkg/descheduler/controllers/migration/arbitrator/handler.go +++ b/pkg/descheduler/controllers/migration/arbitrator/handler.go @@ -17,6 +17,8 @@ limitations under the License. package arbitrator import ( + "context" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" @@ -43,7 +45,7 @@ func NewHandler(arbitrator Arbitrator, client client.Client) handler.EventHandle } // Create call Arbitrator.Create -func (h *arbitrationHandler) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (h *arbitrationHandler) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt) return @@ -53,7 +55,7 @@ func (h *arbitrationHandler) Create(evt event.CreateEvent, q workqueue.RateLimit } // Update implements EventHandler. -func (h *arbitrationHandler) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (h *arbitrationHandler) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { switch { case evt.ObjectNew != nil: q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ @@ -77,7 +79,7 @@ func (h *arbitrationHandler) Update(evt event.UpdateEvent, q workqueue.RateLimit } // Delete implements EventHandler. -func (h *arbitrationHandler) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (h *arbitrationHandler) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt) return diff --git a/pkg/descheduler/controllers/migration/arbitrator/sort_test.go b/pkg/descheduler/controllers/migration/arbitrator/sort_test.go index 92be542de..f66b9097f 100644 --- a/pkg/descheduler/controllers/migration/arbitrator/sort_test.go +++ b/pkg/descheduler/controllers/migration/arbitrator/sort_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/pointer" @@ -191,7 +192,20 @@ func TestSortJobsByMigratingNum(t *testing.T) { scheme := runtime.NewScheme() _ = v1alpha1.AddToScheme(scheme) _ = clientgoscheme.AddToScheme(scheme) - fakeClient := newFieldIndexFakeClient(fake.NewClientBuilder().WithScheme(scheme).Build()) + fakeClient := newFieldIndexFakeClient(fake.NewClientBuilder().WithScheme(scheme). + WithIndex(&corev1.Pod{}, "pod.ownerRefUID", func(obj client.Object) []string { + pod := obj.(*corev1.Pod) + ownerUID := []string{} + for _, ref := range pod.OwnerReferences { + ownerUID = append(ownerUID, string(ref.UID)) + } + return ownerUID + }). + WithIndex(&v1alpha1.PodMigrationJob{}, "job.pod.uid", func(obj client.Object) []string { + pmj := obj.(*v1alpha1.PodMigrationJob) + return []string{string(pmj.Spec.PodRef.UID)} + }). + Build()) creationTime := time.Now() jobs := make([]*batchv1.Job, testCase.jobNum) @@ -422,7 +436,20 @@ func TestGetMigratingJobNum(t *testing.T) { scheme := runtime.NewScheme() _ = v1alpha1.AddToScheme(scheme) _ = clientgoscheme.AddToScheme(scheme) - fakeClient := newFieldIndexFakeClient(fake.NewClientBuilder().WithScheme(scheme).Build()) + fakeClient := newFieldIndexFakeClient(fake.NewClientBuilder().WithScheme(scheme). + WithIndex(&corev1.Pod{}, "pod.ownerRefUID", func(obj client.Object) []string { + pod := obj.(*corev1.Pod) + ownerUID := []string{} + for _, ref := range pod.OwnerReferences { + ownerUID = append(ownerUID, string(ref.UID)) + } + return ownerUID + }). + WithIndex(&v1alpha1.PodMigrationJob{}, "job.pod.uid", func(obj client.Object) []string { + job := obj.(*v1alpha1.PodMigrationJob) + return []string{string(job.Spec.PodRef.UID)} + }). + Build()) creationTime := time.Now() // create job @@ -493,11 +520,25 @@ func TestGetMigratingJobNum(t *testing.T) { } } +var _ client.Client = &fieldIndexFakeClient{} + type fieldIndexFakeClient struct { c client.Client m map[string]func(obj client.Object) []string } +func (f *fieldIndexFakeClient) SubResource(subResource string) client.SubResourceClient { + return f.c.SubResource(subResource) +} + +func (f *fieldIndexFakeClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return f.c.GroupVersionKindFor(obj) +} + +func (f *fieldIndexFakeClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return f.c.IsObjectNamespaced(obj) +} + func newFieldIndexFakeClient(c client.Client) *fieldIndexFakeClient { m := map[string]func(obj client.Object) []string{ fieldindex.IndexPodByNodeName: func(obj client.Object) []string { @@ -554,7 +595,7 @@ func newFieldIndexFakeClient(c client.Client) *fieldIndexFakeClient { } } -func (f *fieldIndexFakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { +func (f *fieldIndexFakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { return f.c.Get(ctx, key, obj) } diff --git a/pkg/descheduler/controllers/migration/controller.go b/pkg/descheduler/controllers/migration/controller.go index 7844c40df..fd141925b 100644 --- a/pkg/descheduler/controllers/migration/controller.go +++ b/pkg/descheduler/controllers/migration/controller.go @@ -27,11 +27,11 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/tools/events" "k8s.io/klog/v2" k8spodutil "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/utils/clock" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" @@ -110,7 +110,7 @@ func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) r.arbitrator = a arbitrationEventHandler := arbitrator.NewHandler(a, r.Client) - if err = c.Watch(&source.Kind{Type: &sev1alpha1.PodMigrationJob{}}, arbitrationEventHandler, &predicate.Funcs{ + if err = c.Watch(source.Kind(options.Manager.GetCache(), &sev1alpha1.PodMigrationJob{}), arbitrationEventHandler, &predicate.Funcs{ DeleteFunc: func(event event.DeleteEvent) bool { job := event.Object.(*sev1alpha1.PodMigrationJob) r.assumedCache.delete(job) @@ -122,7 +122,7 @@ func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) }}); err != nil { return nil, err } - if err = c.Watch(&source.Kind{Type: r.reservationInterpreter.GetReservationType()}, &handler.Funcs{}); err != nil { + if err = c.Watch(source.Kind(options.Manager.GetCache(), r.reservationInterpreter.GetReservationType()), &handler.Funcs{}); err != nil { return nil, err } return r, nil diff --git a/pkg/descheduler/controllers/migration/controller_test.go b/pkg/descheduler/controllers/migration/controller_test.go index c94b213db..a7c54d64a 100644 --- a/pkg/descheduler/controllers/migration/controller_test.go +++ b/pkg/descheduler/controllers/migration/controller_test.go @@ -30,10 +30,11 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/uuid" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" + "k8s.io/utils/clock" + fakceclock "k8s.io/utils/clock/testing" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -104,7 +105,8 @@ func newTestReconciler() *Reconciler { panic(err) } - runtimeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + runtimeClient := fake.NewClientBuilder(). + WithStatusSubresource(&sev1alpha1.PodMigrationJob{}).WithScheme(scheme).Build() eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(scheme, corev1.EventSource{Component: Name}) @@ -158,7 +160,7 @@ func TestAbortJobIfTimeout(t *testing.T) { assert.False(t, timeout) assert.Nil(t, err) - reconciler.clock = clock.NewFakeClock(time.Now().Add(60 * time.Minute)) + reconciler.clock = fakceclock.NewFakeClock(time.Now().Add(60 * time.Minute)) timeout, err = reconciler.abortJobIfTimeout(context.TODO(), job) assert.True(t, timeout) assert.Nil(t, err) @@ -1365,7 +1367,7 @@ func TestDoScavenge(t *testing.T) { } assert.Nil(t, reconciler.Client.Create(context.TODO(), mustScavengeJob)) } - reconciler.clock = clock.NewFakeClock(time.Now().Add(20 * time.Minute)) + reconciler.clock = fakceclock.NewFakeClock(time.Now().Add(20 * time.Minute)) stopCh := make(chan struct{}) close(stopCh) reconciler.scavenger(stopCh) diff --git a/pkg/descheduler/controllers/migration/controllerfinder/pods_finder_test.go b/pkg/descheduler/controllers/migration/controllerfinder/pods_finder_test.go index 2d61d6630..6d6c49ffa 100644 --- a/pkg/descheduler/controllers/migration/controllerfinder/pods_finder_test.go +++ b/pkg/descheduler/controllers/migration/controllerfinder/pods_finder_test.go @@ -16,6 +16,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" sev1alpha1 "github.com/koordinator-sh/koordinator/apis/scheduling/v1alpha1" @@ -63,7 +64,15 @@ func TestControllerFinder_GetPodsForRef(t *testing.T) { _ = appsv1alpha1.AddToScheme(scheme) _ = appsv1beta1.AddToScheme(scheme) _ = kruisepolicyv1alpha1.AddToScheme(scheme) - runtimeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + runtimeClient := fake.NewClientBuilder().WithScheme(scheme). + WithIndex(&corev1.Pod{}, "pod.ownerRefUID", func(obj client.Object) []string { + pod := obj.(*corev1.Pod) + ownerUID := []string{} + for _, ref := range pod.OwnerReferences { + ownerUID = append(ownerUID, string(ref.UID)) + } + return ownerUID + }).Build() deployment := &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ Kind: ControllerKindDep.Kind, diff --git a/pkg/descheduler/informers/factory.go b/pkg/descheduler/informers/factory.go index 0fa468ac5..491583506 100644 --- a/pkg/descheduler/informers/factory.go +++ b/pkg/descheduler/informers/factory.go @@ -42,6 +42,7 @@ import ( "k8s.io/client-go/informers/node" "k8s.io/client-go/informers/policy" "k8s.io/client-go/informers/rbac" + "k8s.io/client-go/informers/resource" "k8s.io/client-go/informers/scheduling" "k8s.io/client-go/informers/storage" "k8s.io/client-go/tools/cache" @@ -151,6 +152,9 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal return sharedIndexerInformer } +func (f *sharedInformerFactory) Shutdown() { +} + func (f *sharedInformerFactory) Admissionregistration() admissionregistration.Interface { return admissionregistration.New(f, f.namespace, f.tweakListOptions) } @@ -215,6 +219,10 @@ func (f *sharedInformerFactory) Rbac() rbac.Interface { return rbac.New(f, f.namespace, f.tweakListOptions) } +func (f *sharedInformerFactory) Resource() resource.Interface { + return resource.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Scheduling() scheduling.Interface { return scheduling.New(f, f.namespace, f.tweakListOptions) } diff --git a/pkg/descheduler/informers/generic.go b/pkg/descheduler/informers/generic.go index f77d205d0..a6d117708 100644 --- a/pkg/descheduler/informers/generic.go +++ b/pkg/descheduler/informers/generic.go @@ -26,11 +26,13 @@ import ( appsv1beta1 "k8s.io/api/apps/v1beta1" "k8s.io/api/apps/v1beta2" autoscalingv1 "k8s.io/api/autoscaling/v1" + v2 "k8s.io/api/autoscaling/v2" "k8s.io/api/autoscaling/v2beta1" "k8s.io/api/autoscaling/v2beta2" batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" @@ -41,8 +43,12 @@ import ( eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/api/flowcontrol/v1alpha1" + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + v1beta3 "k8s.io/api/flowcontrol/v1beta3" networkingv1 "k8s.io/api/networking/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -52,6 +58,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" + v1alpha2 "k8s.io/api/resource/v1alpha2" schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" @@ -88,9 +95,19 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1().ValidatingWebhookConfigurations().Informer()}, nil + // Group=admissionregistration.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().ValidatingAdmissionPolicies().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().ValidatingAdmissionPolicyBindings().Informer()}, nil + // Group=admissionregistration.k8s.io, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().MutatingWebhookConfigurations().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingAdmissionPolicies().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingAdmissionPolicyBindings().Informer()}, nil case v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations().Informer()}, nil @@ -130,6 +147,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case autoscalingv1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1().HorizontalPodAutoscalers().Informer()}, nil + // Group=autoscaling, Version=v2 + case v2.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V2().HorizontalPodAutoscalers().Informer()}, nil + // Group=autoscaling, Version=v2beta1 case v2beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V2beta1().HorizontalPodAutoscalers().Informer()}, nil @@ -152,6 +173,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests"): return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1().CertificateSigningRequests().Informer()}, nil + // Group=certificates.k8s.io, Version=v1alpha1 + case certificatesv1alpha1.SchemeGroupVersion.WithResource("clustertrustbundles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1alpha1().ClusterTrustBundles().Informer()}, nil + // Group=certificates.k8s.io, Version=v1beta1 case certificatesv1beta1.SchemeGroupVersion.WithResource("certificatesigningrequests"): return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1beta1().CertificateSigningRequests().Informer()}, nil @@ -223,15 +248,13 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Ingresses().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("networkpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().NetworkPolicies().Informer()}, nil - case extensionsv1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().PodSecurityPolicies().Informer()}, nil case extensionsv1beta1.SchemeGroupVersion.WithResource("replicasets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().ReplicaSets().Informer()}, nil // Group=flowcontrol.apiserver.k8s.io, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("flowschemas"): + case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("flowschemas"): return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().FlowSchemas().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): + case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().PriorityLevelConfigurations().Informer()}, nil // Group=flowcontrol.apiserver.k8s.io, Version=v1beta1 @@ -240,6 +263,18 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case flowcontrolv1beta1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta1().PriorityLevelConfigurations().Informer()}, nil + // Group=flowcontrol.apiserver.k8s.io, Version=v1beta2 + case flowcontrolv1beta2.SchemeGroupVersion.WithResource("flowschemas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta2().FlowSchemas().Informer()}, nil + case flowcontrolv1beta2.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta2().PriorityLevelConfigurations().Informer()}, nil + + // Group=flowcontrol.apiserver.k8s.io, Version=v1beta3 + case v1beta3.SchemeGroupVersion.WithResource("flowschemas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta3().FlowSchemas().Informer()}, nil + case v1beta3.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1beta3().PriorityLevelConfigurations().Informer()}, nil + // Group=internal.apiserver.k8s.io, Version=v1alpha1 case apiserverinternalv1alpha1.SchemeGroupVersion.WithResource("storageversions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Internal().V1alpha1().StorageVersions().Informer()}, nil @@ -252,6 +287,12 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case networkingv1.SchemeGroupVersion.WithResource("networkpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil + // Group=networking.k8s.io, Version=v1alpha1 + case networkingv1alpha1.SchemeGroupVersion.WithResource("clustercidrs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ClusterCIDRs().Informer()}, nil + case networkingv1alpha1.SchemeGroupVersion.WithResource("ipaddresses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().IPAddresses().Informer()}, nil + // Group=networking.k8s.io, Version=v1beta1 case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().Ingresses().Informer()}, nil @@ -310,6 +351,16 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case rbacv1beta1.SchemeGroupVersion.WithResource("rolebindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil + // Group=resource.k8s.io, Version=v1alpha2 + case v1alpha2.SchemeGroupVersion.WithResource("podschedulingcontexts"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().PodSchedulingContexts().Informer()}, nil + case v1alpha2.SchemeGroupVersion.WithResource("resourceclaims"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaims().Informer()}, nil + case v1alpha2.SchemeGroupVersion.WithResource("resourceclaimtemplates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClaimTemplates().Informer()}, nil + case v1alpha2.SchemeGroupVersion.WithResource("resourceclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha2().ResourceClasses().Informer()}, nil + // Group=scheduling.k8s.io, Version=v1 case schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1().PriorityClasses().Informer()}, nil @@ -327,6 +378,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().CSIDrivers().Informer()}, nil case storagev1.SchemeGroupVersion.WithResource("csinodes"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().CSINodes().Informer()}, nil + case storagev1.SchemeGroupVersion.WithResource("csistoragecapacities"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().CSIStorageCapacities().Informer()}, nil case storagev1.SchemeGroupVersion.WithResource("storageclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().StorageClasses().Informer()}, nil case storagev1.SchemeGroupVersion.WithResource("volumeattachments"): diff --git a/pkg/descheduler/node/node.go b/pkg/descheduler/node/node.go index b850a06cd..3d577c23c 100644 --- a/pkg/descheduler/node/node.go +++ b/pkg/descheduler/node/node.go @@ -201,7 +201,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *corev1.Pod, var insufficientResources []error // Get pod requests - podRequests, _ := resourcehelper.PodRequestsAndLimits(pod) + podRequests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{}) resourceNames := make([]corev1.ResourceName, 0, len(podRequests)) for name := range podRequests { resourceNames = append(resourceNames, name) @@ -264,7 +264,7 @@ func NodeUtilization(pods []*corev1.Pod, resourceNames []corev1.ResourceName) ma } for _, pod := range pods { - req, _ := resourcehelper.PodRequestsAndLimits(pod) + req := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{}) for _, name := range resourceNames { quantity, ok := req[name] if ok && name != corev1.ResourcePods { diff --git a/pkg/features/scheduler_features.go b/pkg/features/scheduler_features.go index 40fe3c1ae..10c05cae3 100644 --- a/pkg/features/scheduler_features.go +++ b/pkg/features/scheduler_features.go @@ -57,6 +57,12 @@ const ( // // ResizePod is used to enable resize pod feature ResizePod featuregate.Feature = "ResizePod" + + CSIStorageCapacity featuregate.Feature = "CSIStorageCapacity" + + GenericEphemeralVolume featuregate.Feature = "GenericEphemeralVolume" + + PodDisruptionBudget featuregate.Feature = "PodDisruptionBudget" ) var defaultSchedulerFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ @@ -70,6 +76,9 @@ var defaultSchedulerFeatureGates = map[featuregate.Feature]featuregate.FeatureSp ElasticQuotaGuaranteeUsage: {Default: false, PreRelease: featuregate.Alpha}, DisableDefaultQuota: {Default: false, PreRelease: featuregate.Alpha}, SupportParentQuotaSubmitPod: {Default: false, PreRelease: featuregate.Alpha}, + CSIStorageCapacity: {Default: true, PreRelease: featuregate.GA}, // remove in 1.26 + GenericEphemeralVolume: {Default: true, PreRelease: featuregate.GA}, + PodDisruptionBudget: {Default: true, PreRelease: featuregate.GA}, } func init() { diff --git a/pkg/koordlet/prediction/predict_server.go b/pkg/koordlet/prediction/predict_server.go index 2c5d3694c..d3f440e1d 100644 --- a/pkg/koordlet/prediction/predict_server.go +++ b/pkg/koordlet/prediction/predict_server.go @@ -27,10 +27,10 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" + "k8s.io/utils/clock" "github.com/koordinator-sh/koordinator/apis/extension" "github.com/koordinator-sh/koordinator/pkg/koordlet/metriccache" diff --git a/pkg/koordlet/prediction/predict_server_test.go b/pkg/koordlet/prediction/predict_server_test.go index 40487a936..830cf9fa3 100644 --- a/pkg/koordlet/prediction/predict_server_test.go +++ b/pkg/koordlet/prediction/predict_server_test.go @@ -28,7 +28,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/clock" + clock "k8s.io/utils/clock/testing" "github.com/koordinator-sh/koordinator/pkg/util/histogram" ) diff --git a/pkg/koordlet/qosmanager/framework/context.go b/pkg/koordlet/qosmanager/framework/context.go index f73c49c98..e4d86e9a1 100644 --- a/pkg/koordlet/qosmanager/framework/context.go +++ b/pkg/koordlet/qosmanager/framework/context.go @@ -66,6 +66,14 @@ func (r *Evictor) EvictPodsIfNotEvicted(evictPods []*corev1.Pod, node *corev1.No } } +func (r *Evictor) IsPodEvicted(pod *corev1.Pod) bool { + if pod == nil { + return false + } + _, evicted := r.podsEvicted.Get(string(pod.UID)) + return evicted +} + func (r *Evictor) evictPodIfNotEvicted(evictPod *corev1.Pod, node *corev1.Node, reason string, message string) { _, evicted := r.podsEvicted.Get(string(evictPod.UID)) if evicted { diff --git a/pkg/koordlet/qosmanager/helpers/operation.go b/pkg/koordlet/qosmanager/helpers/operation.go index f1067f621..915e62186 100644 --- a/pkg/koordlet/qosmanager/helpers/operation.go +++ b/pkg/koordlet/qosmanager/helpers/operation.go @@ -17,6 +17,8 @@ limitations under the License. package helpers import ( + "context" + corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" @@ -44,7 +46,7 @@ func KillContainers(pod *corev1.Pod, message string) { klog.Errorf("%s, kill container(%s) error! GetRuntimeHandler fail! error: %v", message, containerStatus.ContainerID, err) continue } - if err := runtimeHandler.StopContainer(containerID, 0); err != nil { + if err := runtimeHandler.StopContainer(context.Background(), containerID, 0); err != nil { klog.Errorf("%s, stop container error! error: %v", message, err) } } else { diff --git a/pkg/koordlet/qosmanager/plugins/cpuevict/cpu_evict_test.go b/pkg/koordlet/qosmanager/plugins/cpuevict/cpu_evict_test.go index 717f72345..da3013683 100644 --- a/pkg/koordlet/qosmanager/plugins/cpuevict/cpu_evict_test.go +++ b/pkg/koordlet/qosmanager/plugins/cpuevict/cpu_evict_test.go @@ -574,17 +574,12 @@ func Test_killAndEvictBEPodsRelease(t *testing.T) { cpuEvictor.killAndEvictBEPodsRelease(node, podEvictInfosSorted, 18*1000) - getEvictObject, err := client.Tracker().Get(testutil.PodsResource, podEvictInfosSorted[0].pod.Namespace, podEvictInfosSorted[0].pod.Name) - assert.NotNil(t, getEvictObject, "evictPod Fail, err: %v", err) - assert.IsType(t, &policyv1beta1.Eviction{}, getEvictObject, "evictPod: %s Fail", podEvictInfosSorted[0].pod.Name) + // evict subresource will not be creat or update in client go testing, check evict object + // https://github.com/kubernetes/client-go/blob/v0.28.7/testing/fixture.go#L117 + assert.True(t, cpuEvictor.evictor.IsPodEvicted(podEvictInfosSorted[0].pod)) + assert.True(t, cpuEvictor.evictor.IsPodEvicted(podEvictInfosSorted[1].pod)) + assert.False(t, cpuEvictor.evictor.IsPodEvicted(podEvictInfosSorted[2].pod)) - getEvictObject, err = client.Tracker().Get(testutil.PodsResource, podEvictInfosSorted[1].pod.Namespace, podEvictInfosSorted[1].pod.Name) - assert.NotNil(t, getEvictObject, "evictPod Fail, err: %v", err) - assert.IsType(t, &policyv1beta1.Eviction{}, getEvictObject, "evictPod: %s Fail", podEvictInfosSorted[1].pod.Name) - - getNotEvictObject, err := client.Tracker().Get(testutil.PodsResource, podEvictInfosSorted[2].pod.Namespace, podEvictInfosSorted[2].pod.Name) - assert.Nil(t, err, "get pod fail", err) - assert.IsType(t, &corev1.Pod{}, getNotEvictObject, "no need evict", podEvictInfosSorted[2].pod.Name) assert.True(t, cpuEvictor.lastEvictTime.After(time.Now().Add(-5*time.Second)), "checkLastTime") } diff --git a/pkg/koordlet/qosmanager/plugins/memoryevict/memory_evict_test.go b/pkg/koordlet/qosmanager/plugins/memoryevict/memory_evict_test.go index e914c2202..623d490b7 100644 --- a/pkg/koordlet/qosmanager/plugins/memoryevict/memory_evict_test.go +++ b/pkg/koordlet/qosmanager/plugins/memoryevict/memory_evict_test.go @@ -410,17 +410,14 @@ func Test_memoryEvict(t *testing.T) { memoryEvictor.lastEvictTime = time.Now().Add(-30 * time.Second) memoryEvictor.memoryEvict() + // evict subresource will not be creat or update in client go testing, check evict object + // https://github.com/kubernetes/client-go/blob/v0.28.7/testing/fixture.go#L117 for _, pod := range tt.expectEvictPods { - getEvictObject, err := client.Tracker().Get(testutil.PodsResource, pod.Namespace, pod.Name) - assert.NotNil(t, getEvictObject, "evictPod Fail", err) - assert.IsType(t, &policyv1beta1.Eviction{}, getEvictObject, "evictPod Fail", pod.Name) + assert.True(t, memoryEvictor.evictor.IsPodEvicted(pod)) } for _, pod := range tt.expectNotEvictPods { - getObject, _ := client.Tracker().Get(testutil.PodsResource, pod.Namespace, pod.Name) - assert.IsType(t, &corev1.Pod{}, getObject, "no need evict", pod.Name) - gotPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) - assert.NotNil(t, gotPod, "no need evict!", err) + assert.False(t, memoryEvictor.evictor.IsPodEvicted(pod)) } }) } diff --git a/pkg/koordlet/runtimehooks/protocol/protocol.go b/pkg/koordlet/runtimehooks/protocol/protocol.go index 0688e2bfe..6950ba6c7 100644 --- a/pkg/koordlet/runtimehooks/protocol/protocol.go +++ b/pkg/koordlet/runtimehooks/protocol/protocol.go @@ -88,7 +88,8 @@ func (r *Resources) IsOriginResSet() bool { } func (r *Resources) FromPod(pod *corev1.Pod) { - requests, limits := resource.PodRequestsAndLimits(pod) + requests := resource.PodRequests(pod, resource.PodResourcesOptions{}) + limits := resource.PodLimits(pod, resource.PodResourcesOptions{}) cpuShares := sysutil.MilliCPUToShares(requests.Cpu().MilliValue()) cfsQuota := sysutil.MilliCPUToQuota(limits.Cpu().MilliValue()) memoryLimit := limits.Memory().Value() diff --git a/pkg/koordlet/util/kubelet/cpu_assignment.go b/pkg/koordlet/util/kubelet/cpu_assignment.go index 4516808b4..51bb1533c 100644 --- a/pkg/koordlet/util/kubelet/cpu_assignment.go +++ b/pkg/koordlet/util/kubelet/cpu_assignment.go @@ -22,7 +22,7 @@ import ( "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" + "k8s.io/utils/cpuset" ) type LoopControl int @@ -97,7 +97,7 @@ func (n *numaFirst) takeFullSecondLevel() { // If NUMA nodes are higher in the memory hierarchy than sockets, then just // sort the NUMA nodes directly, and return them. func (n *numaFirst) sortAvailableNUMANodes() []int { - numas := n.acc.details.NUMANodes().ToSliceNoSort() + numas := n.acc.details.NUMANodes().UnsortedList() n.acc.sort(numas, n.acc.details.CPUsInNUMANodes) return numas } @@ -108,7 +108,7 @@ func (n *numaFirst) sortAvailableNUMANodes() []int { func (n *numaFirst) sortAvailableSockets() []int { var result []int for _, numa := range n.sortAvailableNUMANodes() { - sockets := n.acc.details.SocketsInNUMANodes(numa).ToSliceNoSort() + sockets := n.acc.details.SocketsInNUMANodes(numa).UnsortedList() n.acc.sort(sockets, n.acc.details.CPUsInSockets) result = append(result, sockets...) } @@ -120,7 +120,7 @@ func (n *numaFirst) sortAvailableSockets() []int { func (n *numaFirst) sortAvailableCores() []int { var result []int for _, socket := range n.acc.sortAvailableSockets() { - cores := n.acc.details.CoresInSockets(socket).ToSliceNoSort() + cores := n.acc.details.CoresInSockets(socket).UnsortedList() n.acc.sort(cores, n.acc.details.CPUsInCores) result = append(result, cores...) } @@ -145,7 +145,7 @@ func (s *socketsFirst) takeFullSecondLevel() { func (s *socketsFirst) sortAvailableNUMANodes() []int { var result []int for _, socket := range s.sortAvailableSockets() { - numas := s.acc.details.NUMANodesInSockets(socket).ToSliceNoSort() + numas := s.acc.details.NUMANodesInSockets(socket).UnsortedList() s.acc.sort(numas, s.acc.details.CPUsInNUMANodes) result = append(result, numas...) } @@ -155,7 +155,7 @@ func (s *socketsFirst) sortAvailableNUMANodes() []int { // If sockets are higher in the memory hierarchy than NUMA nodes, then just // sort the sockets directly, and return them. func (s *socketsFirst) sortAvailableSockets() []int { - sockets := s.acc.details.Sockets().ToSliceNoSort() + sockets := s.acc.details.Sockets().UnsortedList() s.acc.sort(sockets, s.acc.details.CPUsInSockets) return sockets } @@ -165,7 +165,7 @@ func (s *socketsFirst) sortAvailableSockets() []int { func (s *socketsFirst) sortAvailableCores() []int { var result []int for _, numa := range s.acc.sortAvailableNUMANodes() { - cores := s.acc.details.CoresInNUMANodes(numa).ToSliceNoSort() + cores := s.acc.details.CoresInNUMANodes(numa).UnsortedList() s.acc.sort(cores, s.acc.details.CPUsInCores) result = append(result, cores...) } @@ -185,7 +185,7 @@ func newCPUAccumulator(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, topo: topo, details: topo.CPUDetails.KeepOnly(availableCPUs), numCPUsNeeded: numCPUs, - result: cpuset.NewCPUSet(), + result: cpuset.New(), } // if topo.NumSockets >= topo.NumNUMANodes { @@ -293,7 +293,7 @@ func (a *cpuAccumulator) sortAvailableCores() []int { func (a *cpuAccumulator) sortAvailableCPUs() []int { var result []int for _, core := range a.sortAvailableCores() { - cpus := a.details.CPUsInCores(core).ToSliceNoSort() + cpus := a.details.CPUsInCores(core).UnsortedList() sort.Ints(cpus) result = append(result, cpus...) } @@ -342,7 +342,7 @@ func (a *cpuAccumulator) takeFullCores() { func (a *cpuAccumulator) takeRemainingCPUs() { for _, cpu := range a.sortAvailableCPUs() { klog.V(4).InfoS("takeRemainingCPUs: claiming CPU", "cpu", cpu) - a.take(cpuset.NewCPUSet(cpu)) + a.take(cpuset.New(cpu)) if a.isSatisfied() { return } @@ -423,7 +423,7 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C return acc.result, nil } if acc.isFailed() { - return cpuset.NewCPUSet(), fmt.Errorf("not enough cpus available to satisfy request") + return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request") } // Algorithm: topology-aware best-fit @@ -455,7 +455,7 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C return acc.result, nil } - return cpuset.NewCPUSet(), fmt.Errorf("failed to allocate cpus") + return cpuset.New(), fmt.Errorf("failed to allocate cpus") } func TakeByTopology(availableCPUs cpuset.CPUSet, numCPUs int, cpuTopology *topology.CPUTopology) (cpuset.CPUSet, error) { diff --git a/pkg/koordlet/util/kubelet/cpu_assignment_test.go b/pkg/koordlet/util/kubelet/cpu_assignment_test.go index 395fb23d9..9e1377a25 100644 --- a/pkg/koordlet/util/kubelet/cpu_assignment_test.go +++ b/pkg/koordlet/util/kubelet/cpu_assignment_test.go @@ -22,7 +22,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" + "k8s.io/utils/cpuset" ) func TestCPUAccumulatorFreeSockets(t *testing.T) { @@ -35,31 +35,31 @@ func TestCPUAccumulatorFreeSockets(t *testing.T) { { "single socket HT, 1 socket free", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), []int{0}, }, { "single socket HT, 0 sockets free", topoSingleSocketHT, - cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7), + cpuset.New(1, 2, 3, 4, 5, 6, 7), []int{}, }, { "dual socket HT, 2 sockets free", topoDualSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), []int{0, 1}, }, { "dual socket HT, 1 socket free", topoDualSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11), []int{1}, }, { "dual socket HT, 0 sockets free", topoDualSocketHT, - cpuset.NewCPUSet(0, 2, 3, 4, 5, 6, 7, 8, 9, 11), + cpuset.New(0, 2, 3, 4, 5, 6, 7, 8, 9, 11), []int{}, }, { @@ -188,49 +188,49 @@ func TestCPUAccumulatorFreeCores(t *testing.T) { { "single socket HT, 4 cores free", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), []int{0, 1, 2, 3}, }, { "single socket HT, 3 cores free", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 4, 5, 6), + cpuset.New(0, 1, 2, 4, 5, 6), []int{0, 1, 2}, }, { "single socket HT, 3 cores free (1 partially consumed)", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6), + cpuset.New(0, 1, 2, 3, 4, 5, 6), []int{0, 1, 2}, }, { "single socket HT, 0 cores free", topoSingleSocketHT, - cpuset.NewCPUSet(), + cpuset.New(), []int{}, }, { "single socket HT, 0 cores free (4 partially consumed)", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3), + cpuset.New(0, 1, 2, 3), []int{}, }, { "dual socket HT, 6 cores free", topoDualSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), []int{0, 2, 4, 1, 3, 5}, }, { "dual socket HT, 5 cores free (1 consumed from socket 0)", topoDualSocketHT, - cpuset.NewCPUSet(2, 1, 3, 4, 5, 7, 8, 9, 10, 11), + cpuset.New(2, 1, 3, 4, 5, 7, 8, 9, 10, 11), []int{2, 4, 1, 3, 5}, }, { "dual socket HT, 4 cores free (1 consumed from each socket)", topoDualSocketHT, - cpuset.NewCPUSet(2, 3, 4, 5, 8, 9, 10, 11), + cpuset.New(2, 3, 4, 5, 8, 9, 10, 11), []int{2, 4, 3, 5}, }, } @@ -256,37 +256,37 @@ func TestCPUAccumulatorFreeCPUs(t *testing.T) { { "single socket HT, 8 cpus free", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), []int{0, 4, 1, 5, 2, 6, 3, 7}, }, { "single socket HT, 5 cpus free", topoSingleSocketHT, - cpuset.NewCPUSet(3, 4, 5, 6, 7), + cpuset.New(3, 4, 5, 6, 7), []int{4, 5, 6, 3, 7}, }, { "dual socket HT, 12 cpus free", topoDualSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), []int{0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11}, }, { "dual socket HT, 11 cpus free", topoDualSocketHT, - cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), + cpuset.New(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), []int{6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11}, }, { "dual socket HT, 10 cpus free", topoDualSocketHT, - cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11), + cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11), []int{2, 8, 4, 10, 1, 7, 3, 9, 5, 11}, }, { "triple socket HT, 12 cpus free", topoTripleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13), + cpuset.New(0, 1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13), []int{12, 13, 0, 1, 2, 3, 6, 7, 8, 9, 10, 11}, }, } @@ -315,8 +315,8 @@ func TestCPUAccumulatorTake(t *testing.T) { { "take 0 cpus from a single socket HT, require 1", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), - []cpuset.CPUSet{cpuset.NewCPUSet()}, + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), + []cpuset.CPUSet{cpuset.New()}, 1, false, false, @@ -324,8 +324,8 @@ func TestCPUAccumulatorTake(t *testing.T) { { "take 0 cpus from a single socket HT, require 1, none available", topoSingleSocketHT, - cpuset.NewCPUSet(), - []cpuset.CPUSet{cpuset.NewCPUSet()}, + cpuset.New(), + []cpuset.CPUSet{cpuset.New()}, 1, false, true, @@ -333,8 +333,8 @@ func TestCPUAccumulatorTake(t *testing.T) { { "take 1 cpu from a single socket HT, require 1", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), - []cpuset.CPUSet{cpuset.NewCPUSet(0)}, + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), + []cpuset.CPUSet{cpuset.New(0)}, 1, true, false, @@ -342,8 +342,8 @@ func TestCPUAccumulatorTake(t *testing.T) { { "take 1 cpu from a single socket HT, require 2", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), - []cpuset.CPUSet{cpuset.NewCPUSet(0)}, + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), + []cpuset.CPUSet{cpuset.New(0)}, 2, false, false, @@ -351,8 +351,8 @@ func TestCPUAccumulatorTake(t *testing.T) { { "take 2 cpu from a single socket HT, require 4, expect failed", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2), - []cpuset.CPUSet{cpuset.NewCPUSet(0), cpuset.NewCPUSet(1)}, + cpuset.New(0, 1, 2), + []cpuset.CPUSet{cpuset.New(0), cpuset.New(1)}, 4, false, true, @@ -360,16 +360,16 @@ func TestCPUAccumulatorTake(t *testing.T) { { "take all cpus one at a time from a single socket HT, require 8", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), []cpuset.CPUSet{ - cpuset.NewCPUSet(0), - cpuset.NewCPUSet(1), - cpuset.NewCPUSet(2), - cpuset.NewCPUSet(3), - cpuset.NewCPUSet(4), - cpuset.NewCPUSet(5), - cpuset.NewCPUSet(6), - cpuset.NewCPUSet(7), + cpuset.New(0), + cpuset.New(1), + cpuset.New(2), + cpuset.New(3), + cpuset.New(4), + cpuset.New(5), + cpuset.New(6), + cpuset.New(7), }, 8, true, @@ -422,66 +422,66 @@ func commonTakeByTopologyTestCases(t *testing.T) []takeByTopologyTestCase { { "take more cpus than are available from single socket with HT", topoSingleSocketHT, - cpuset.NewCPUSet(0, 2, 4, 6), + cpuset.New(0, 2, 4, 6), 5, "not enough cpus available to satisfy request", - cpuset.NewCPUSet(), + cpuset.New(), }, { "take zero cpus from single socket with HT", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), 0, "", - cpuset.NewCPUSet(), + cpuset.New(), }, { "take one cpu from single socket with HT", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), 1, "", - cpuset.NewCPUSet(0), + cpuset.New(0), }, { "take one cpu from single socket with HT, some cpus are taken", topoSingleSocketHT, - cpuset.NewCPUSet(1, 3, 5, 6, 7), + cpuset.New(1, 3, 5, 6, 7), 1, "", - cpuset.NewCPUSet(6), + cpuset.New(6), }, { "take two cpus from single socket with HT", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), 2, "", - cpuset.NewCPUSet(0, 4), + cpuset.New(0, 4), }, { "take all cpus from single socket with HT", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), 8, "", - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7), }, { "take two cpus from single socket with HT, only one core totally free", topoSingleSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 6), + cpuset.New(0, 1, 2, 3, 6), 2, "", - cpuset.NewCPUSet(2, 6), + cpuset.New(2, 6), }, { "take a socket of cpus from dual socket with HT", topoDualSocketHT, - cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), + cpuset.New(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), 6, "", - cpuset.NewCPUSet(0, 2, 4, 6, 8, 10), + cpuset.New(0, 2, 4, 6, 8, 10), }, { "take a socket of cpus from dual socket with multi-numa-per-socket with HT", @@ -532,10 +532,10 @@ func TestTakeByTopologyNUMAPacked(t *testing.T) { { "take one cpu from dual socket with HT - core from Socket 0", topoDualSocketHT, - cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11), + cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11), 1, "", - cpuset.NewCPUSet(2), + cpuset.New(2), }, { "allocate 4 full cores with 3 coming from the first NUMA node (filling it up) and 1 coming from the second NUMA node", diff --git a/pkg/koordlet/util/kubelet/kubelet.go b/pkg/koordlet/util/kubelet/kubelet.go index 1a44950e1..e64d8539f 100644 --- a/pkg/koordlet/util/kubelet/kubelet.go +++ b/pkg/koordlet/util/kubelet/kubelet.go @@ -29,10 +29,10 @@ import ( kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/eviction" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" "k8s.io/kubernetes/pkg/kubelet/stats/pidlimit" + "k8s.io/utils/cpuset" koordletutil "github.com/koordinator-sh/koordinator/pkg/koordlet/util" ) @@ -148,7 +148,7 @@ func GetKubeletReservedOptions(kubeletConfiguration *kubeletconfiginternal.Kubel } func getReservedCPUs(topology *topology.CPUTopology, cpus string) (cpuset.CPUSet, error) { - emptyCPUSet := cpuset.NewCPUSet() + emptyCPUSet := cpuset.New() if cpus == "" { return emptyCPUSet, nil diff --git a/pkg/koordlet/util/runtime/handler/containerd_runtime.go b/pkg/koordlet/util/runtime/handler/containerd_runtime.go index 834704de1..5ddcc8b8f 100644 --- a/pkg/koordlet/util/runtime/handler/containerd_runtime.go +++ b/pkg/koordlet/util/runtime/handler/containerd_runtime.go @@ -68,7 +68,7 @@ func NewContainerdRuntimeHandler(endpoint string) (ContainerRuntimeHandler, erro }, nil } -func (c *ContainerdRuntimeHandler) StopContainer(containerID string, timeout int64) error { +func (c *ContainerdRuntimeHandler) StopContainer(con context.Context, containerID string, timeout int64) error { if containerID == "" { return fmt.Errorf("containerID cannot be empty") } diff --git a/pkg/koordlet/util/runtime/handler/containerd_runtime_test.go b/pkg/koordlet/util/runtime/handler/containerd_runtime_test.go index 03ecf529b..dd6d2f4b6 100644 --- a/pkg/koordlet/util/runtime/handler/containerd_runtime_test.go +++ b/pkg/koordlet/util/runtime/handler/containerd_runtime_test.go @@ -87,7 +87,7 @@ func Test_Containerd_StopContainer(t *testing.T) { mockRuntimeClient.EXPECT().StopContainer(gomock.Any(), gomock.Any()).Return(nil, tt.runtimeError) runtimeHandler := ContainerdRuntimeHandler{runtimeServiceClient: mockRuntimeClient, timeout: 1, endpoint: GetContainerdEndpoint()} - gotErr := runtimeHandler.StopContainer(tt.containerId, 1) + gotErr := runtimeHandler.StopContainer(context.TODO(), tt.containerId, 1) assert.Equal(t, gotErr != nil, tt.expectError) }) diff --git a/pkg/koordlet/util/runtime/handler/crio_runtime.go b/pkg/koordlet/util/runtime/handler/crio_runtime.go index ad275a511..d89a43aef 100644 --- a/pkg/koordlet/util/runtime/handler/crio_runtime.go +++ b/pkg/koordlet/util/runtime/handler/crio_runtime.go @@ -61,12 +61,12 @@ func NewCrioRuntimeHandler(endpoint string) (ContainerRuntimeHandler, error) { }, nil } -func (c *CrioRuntimeHandler) StopContainer(containerID string, timeout int64) error { +func (c *CrioRuntimeHandler) StopContainer(ctx context.Context, containerID string, timeout int64) error { if containerID == "" { return fmt.Errorf("containerID cannot be empty") } t := c.timeout + time.Duration(timeout) - ctx, cancel := context.WithTimeout(context.Background(), t) + ctx, cancel := context.WithTimeout(ctx, t) defer cancel() request := &runtimeapi.StopContainerRequest{ diff --git a/pkg/koordlet/util/runtime/handler/crio_runtime_test.go b/pkg/koordlet/util/runtime/handler/crio_runtime_test.go index edc9c64b8..d5b955a7b 100644 --- a/pkg/koordlet/util/runtime/handler/crio_runtime_test.go +++ b/pkg/koordlet/util/runtime/handler/crio_runtime_test.go @@ -87,7 +87,7 @@ func Test_Crio_StopContainer(t *testing.T) { mockRuntimeClient.EXPECT().StopContainer(gomock.Any(), gomock.Any()).Return(nil, tt.runtimeError) runtimeHandler := ContainerdRuntimeHandler{runtimeServiceClient: mockRuntimeClient, timeout: 1, endpoint: GetCrioEndpoint()} - gotErr := runtimeHandler.StopContainer(tt.containerId, 1) + gotErr := runtimeHandler.StopContainer(context.TODO(), tt.containerId, 1) assert.Equal(t, gotErr != nil, tt.expectError) }) diff --git a/pkg/koordlet/util/runtime/handler/docker_runtime.go b/pkg/koordlet/util/runtime/handler/docker_runtime.go index 9cbd99a17..442dcb521 100644 --- a/pkg/koordlet/util/runtime/handler/docker_runtime.go +++ b/pkg/koordlet/util/runtime/handler/docker_runtime.go @@ -75,7 +75,7 @@ func createDockerClient(httpClient *http.Client, endPoint string) (*dclient.Clie return dclient.NewClientWithOpts(dclient.WithHost(endPoint), dclient.WithHTTPClient(httpClient)) } -func (d *DockerRuntimeHandler) StopContainer(containerID string, timeout int64) error { +func (d *DockerRuntimeHandler) StopContainer(c context.Context, containerID string, timeout int64) error { if d == nil || d.dockerClient == nil { return fmt.Errorf("stop container fail! docker client is nil! containerID=%v", containerID) } diff --git a/pkg/koordlet/util/runtime/handler/docker_runtime_test.go b/pkg/koordlet/util/runtime/handler/docker_runtime_test.go index 8059c8033..a07eb64ef 100644 --- a/pkg/koordlet/util/runtime/handler/docker_runtime_test.go +++ b/pkg/koordlet/util/runtime/handler/docker_runtime_test.go @@ -18,6 +18,7 @@ package handler import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -90,13 +91,13 @@ func Test_Docker_StopContainer(t *testing.T) { dockerClient, err := createDockerClient(newMockClient(stopContainer), endPoint) assert.NoError(t, err) dockerRuntimeHandler := DockerRuntimeHandler{endpoint: endPoint, dockerClient: dockerClient} - err = dockerRuntimeHandler.StopContainer("test_stop_container", 5) + err = dockerRuntimeHandler.StopContainer(context.TODO(), "test_stop_container", 5) assert.NoError(t, err) - err = dockerRuntimeHandler.StopContainer("", 5) + err = dockerRuntimeHandler.StopContainer(context.TODO(), "", 5) assert.Error(t, err) dockerRuntimeHandlerNotInit := DockerRuntimeHandler{endpoint: endPoint, dockerClient: nil} - err = dockerRuntimeHandlerNotInit.StopContainer("test_stop_container", 5) + err = dockerRuntimeHandlerNotInit.StopContainer(context.TODO(), "test_stop_container", 5) assert.Error(t, err) } diff --git a/pkg/koordlet/util/runtime/handler/fake_runtime.go b/pkg/koordlet/util/runtime/handler/fake_runtime.go index 0b388c786..4fd86ca66 100644 --- a/pkg/koordlet/util/runtime/handler/fake_runtime.go +++ b/pkg/koordlet/util/runtime/handler/fake_runtime.go @@ -17,6 +17,8 @@ limitations under the License. package handler import ( + "context" + "k8s.io/cri-api/pkg/apis/testing" "github.com/koordinator-sh/koordinator/pkg/koordlet/statesinformer" @@ -31,8 +33,8 @@ func NewFakeRuntimeHandler() ContainerRuntimeHandler { return &FakeRuntimeHandler{fakeRuntimeService: testing.NewFakeRuntimeService(), PodMetas: make(map[string]*statesinformer.PodMeta)} } -func (f *FakeRuntimeHandler) StopContainer(containerID string, timeout int64) error { - return f.fakeRuntimeService.StopContainer(containerID, timeout) +func (f *FakeRuntimeHandler) StopContainer(ctx context.Context, containerID string, timeout int64) error { + return f.fakeRuntimeService.StopContainer(ctx, containerID, timeout) } func (f *FakeRuntimeHandler) SetFakeContainers(containers []*testing.FakeContainer) { diff --git a/pkg/koordlet/util/runtime/handler/interface.go b/pkg/koordlet/util/runtime/handler/interface.go index cab4199ba..0ed64d442 100644 --- a/pkg/koordlet/util/runtime/handler/interface.go +++ b/pkg/koordlet/util/runtime/handler/interface.go @@ -16,7 +16,10 @@ limitations under the License. package handler -import "time" +import ( + "context" + "time" +) const ( // unixProtocol is the network protocol of unix socket. @@ -25,7 +28,7 @@ const ( ) type ContainerRuntimeHandler interface { - StopContainer(containerID string, timeout int64) error + StopContainer(ctx context.Context, containerID string, timeout int64) error UpdateContainerResources(containerID string, opts UpdateOptions) error } diff --git a/pkg/koordlet/util/runtime/handler/mockclient/mock.go b/pkg/koordlet/util/runtime/handler/mockclient/mock.go index 08c26aa89..176530551 100644 --- a/pkg/koordlet/util/runtime/handler/mockclient/mock.go +++ b/pkg/koordlet/util/runtime/handler/mockclient/mock.go @@ -27,6 +27,7 @@ import ( gomock "github.com/golang/mock/gomock" grpc "google.golang.org/grpc" + metadata "google.golang.org/grpc/metadata" v1 "k8s.io/cri-api/pkg/apis/runtime/v1" ) @@ -73,6 +74,26 @@ func (mr *MockRuntimeServiceClientMockRecorder) Attach(ctx, in interface{}, opts return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attach", reflect.TypeOf((*MockRuntimeServiceClient)(nil).Attach), varargs...) } +// CheckpointContainer mocks base method. +func (m *MockRuntimeServiceClient) CheckpointContainer(ctx context.Context, in *v1.CheckpointContainerRequest, opts ...grpc.CallOption) (*v1.CheckpointContainerResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CheckpointContainer", varargs...) + ret0, _ := ret[0].(*v1.CheckpointContainerResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CheckpointContainer indicates an expected call of CheckpointContainer. +func (mr *MockRuntimeServiceClientMockRecorder) CheckpointContainer(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointContainer", reflect.TypeOf((*MockRuntimeServiceClient)(nil).CheckpointContainer), varargs...) +} + // ContainerStats mocks base method. func (m *MockRuntimeServiceClient) ContainerStats(ctx context.Context, in *v1.ContainerStatsRequest, opts ...grpc.CallOption) (*v1.ContainerStatsResponse, error) { m.ctrl.T.Helper() @@ -173,6 +194,26 @@ func (mr *MockRuntimeServiceClientMockRecorder) ExecSync(ctx, in interface{}, op return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecSync", reflect.TypeOf((*MockRuntimeServiceClient)(nil).ExecSync), varargs...) } +// GetContainerEvents mocks base method. +func (m *MockRuntimeServiceClient) GetContainerEvents(ctx context.Context, in *v1.GetEventsRequest, opts ...grpc.CallOption) (v1.RuntimeService_GetContainerEventsClient, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetContainerEvents", varargs...) + ret0, _ := ret[0].(v1.RuntimeService_GetContainerEventsClient) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetContainerEvents indicates an expected call of GetContainerEvents. +func (mr *MockRuntimeServiceClientMockRecorder) GetContainerEvents(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContainerEvents", reflect.TypeOf((*MockRuntimeServiceClient)(nil).GetContainerEvents), varargs...) +} + // ListContainerStats mocks base method. func (m *MockRuntimeServiceClient) ListContainerStats(ctx context.Context, in *v1.ListContainerStatsRequest, opts ...grpc.CallOption) (*v1.ListContainerStatsResponse, error) { m.ctrl.T.Helper() @@ -213,6 +254,26 @@ func (mr *MockRuntimeServiceClientMockRecorder) ListContainers(ctx, in interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListContainers", reflect.TypeOf((*MockRuntimeServiceClient)(nil).ListContainers), varargs...) } +// ListMetricDescriptors mocks base method. +func (m *MockRuntimeServiceClient) ListMetricDescriptors(ctx context.Context, in *v1.ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*v1.ListMetricDescriptorsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListMetricDescriptors", varargs...) + ret0, _ := ret[0].(*v1.ListMetricDescriptorsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMetricDescriptors indicates an expected call of ListMetricDescriptors. +func (mr *MockRuntimeServiceClientMockRecorder) ListMetricDescriptors(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMetricDescriptors", reflect.TypeOf((*MockRuntimeServiceClient)(nil).ListMetricDescriptors), varargs...) +} + // ListPodSandbox mocks base method. func (m *MockRuntimeServiceClient) ListPodSandbox(ctx context.Context, in *v1.ListPodSandboxRequest, opts ...grpc.CallOption) (*v1.ListPodSandboxResponse, error) { m.ctrl.T.Helper() @@ -233,6 +294,26 @@ func (mr *MockRuntimeServiceClientMockRecorder) ListPodSandbox(ctx, in interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodSandbox", reflect.TypeOf((*MockRuntimeServiceClient)(nil).ListPodSandbox), varargs...) } +// ListPodSandboxMetrics mocks base method. +func (m *MockRuntimeServiceClient) ListPodSandboxMetrics(ctx context.Context, in *v1.ListPodSandboxMetricsRequest, opts ...grpc.CallOption) (*v1.ListPodSandboxMetricsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListPodSandboxMetrics", varargs...) + ret0, _ := ret[0].(*v1.ListPodSandboxMetricsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListPodSandboxMetrics indicates an expected call of ListPodSandboxMetrics. +func (mr *MockRuntimeServiceClientMockRecorder) ListPodSandboxMetrics(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodSandboxMetrics", reflect.TypeOf((*MockRuntimeServiceClient)(nil).ListPodSandboxMetrics), varargs...) +} + // ListPodSandboxStats mocks base method. func (m *MockRuntimeServiceClient) ListPodSandboxStats(ctx context.Context, in *v1.ListPodSandboxStatsRequest, opts ...grpc.CallOption) (*v1.ListPodSandboxStatsResponse, error) { m.ctrl.T.Helper() @@ -393,6 +474,26 @@ func (mr *MockRuntimeServiceClientMockRecorder) RunPodSandbox(ctx, in interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunPodSandbox", reflect.TypeOf((*MockRuntimeServiceClient)(nil).RunPodSandbox), varargs...) } +// RuntimeConfig mocks base method. +func (m *MockRuntimeServiceClient) RuntimeConfig(ctx context.Context, in *v1.RuntimeConfigRequest, opts ...grpc.CallOption) (*v1.RuntimeConfigResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "RuntimeConfig", varargs...) + ret0, _ := ret[0].(*v1.RuntimeConfigResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RuntimeConfig indicates an expected call of RuntimeConfig. +func (mr *MockRuntimeServiceClientMockRecorder) RuntimeConfig(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RuntimeConfig", reflect.TypeOf((*MockRuntimeServiceClient)(nil).RuntimeConfig), varargs...) +} + // StartContainer mocks base method. func (m *MockRuntimeServiceClient) StartContainer(ctx context.Context, in *v1.StartContainerRequest, opts ...grpc.CallOption) (*v1.StartContainerResponse, error) { m.ctrl.T.Helper() @@ -533,6 +634,129 @@ func (mr *MockRuntimeServiceClientMockRecorder) Version(ctx, in interface{}, opt return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockRuntimeServiceClient)(nil).Version), varargs...) } +// MockRuntimeService_GetContainerEventsClient is a mock of RuntimeService_GetContainerEventsClient interface. +type MockRuntimeService_GetContainerEventsClient struct { + ctrl *gomock.Controller + recorder *MockRuntimeService_GetContainerEventsClientMockRecorder +} + +// MockRuntimeService_GetContainerEventsClientMockRecorder is the mock recorder for MockRuntimeService_GetContainerEventsClient. +type MockRuntimeService_GetContainerEventsClientMockRecorder struct { + mock *MockRuntimeService_GetContainerEventsClient +} + +// NewMockRuntimeService_GetContainerEventsClient creates a new mock instance. +func NewMockRuntimeService_GetContainerEventsClient(ctrl *gomock.Controller) *MockRuntimeService_GetContainerEventsClient { + mock := &MockRuntimeService_GetContainerEventsClient{ctrl: ctrl} + mock.recorder = &MockRuntimeService_GetContainerEventsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRuntimeService_GetContainerEventsClient) EXPECT() *MockRuntimeService_GetContainerEventsClientMockRecorder { + return m.recorder +} + +// CloseSend mocks base method. +func (m *MockRuntimeService_GetContainerEventsClient) CloseSend() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CloseSend") + ret0, _ := ret[0].(error) + return ret0 +} + +// CloseSend indicates an expected call of CloseSend. +func (mr *MockRuntimeService_GetContainerEventsClientMockRecorder) CloseSend() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockRuntimeService_GetContainerEventsClient)(nil).CloseSend)) +} + +// Context mocks base method. +func (m *MockRuntimeService_GetContainerEventsClient) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockRuntimeService_GetContainerEventsClientMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockRuntimeService_GetContainerEventsClient)(nil).Context)) +} + +// Header mocks base method. +func (m *MockRuntimeService_GetContainerEventsClient) Header() (metadata.MD, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Header") + ret0, _ := ret[0].(metadata.MD) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Header indicates an expected call of Header. +func (mr *MockRuntimeService_GetContainerEventsClientMockRecorder) Header() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockRuntimeService_GetContainerEventsClient)(nil).Header)) +} + +// Recv mocks base method. +func (m *MockRuntimeService_GetContainerEventsClient) Recv() (*v1.ContainerEventResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Recv") + ret0, _ := ret[0].(*v1.ContainerEventResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Recv indicates an expected call of Recv. +func (mr *MockRuntimeService_GetContainerEventsClientMockRecorder) Recv() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockRuntimeService_GetContainerEventsClient)(nil).Recv)) +} + +// RecvMsg mocks base method. +func (m_2 *MockRuntimeService_GetContainerEventsClient) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockRuntimeService_GetContainerEventsClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockRuntimeService_GetContainerEventsClient)(nil).RecvMsg), m) +} + +// SendMsg mocks base method. +func (m_2 *MockRuntimeService_GetContainerEventsClient) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockRuntimeService_GetContainerEventsClientMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockRuntimeService_GetContainerEventsClient)(nil).SendMsg), m) +} + +// Trailer mocks base method. +func (m *MockRuntimeService_GetContainerEventsClient) Trailer() metadata.MD { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Trailer") + ret0, _ := ret[0].(metadata.MD) + return ret0 +} + +// Trailer indicates an expected call of Trailer. +func (mr *MockRuntimeService_GetContainerEventsClientMockRecorder) Trailer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockRuntimeService_GetContainerEventsClient)(nil).Trailer)) +} + // MockRuntimeServiceServer is a mock of RuntimeServiceServer interface. type MockRuntimeServiceServer struct { ctrl *gomock.Controller @@ -571,6 +795,21 @@ func (mr *MockRuntimeServiceServerMockRecorder) Attach(arg0, arg1 interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attach", reflect.TypeOf((*MockRuntimeServiceServer)(nil).Attach), arg0, arg1) } +// CheckpointContainer mocks base method. +func (m *MockRuntimeServiceServer) CheckpointContainer(arg0 context.Context, arg1 *v1.CheckpointContainerRequest) (*v1.CheckpointContainerResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CheckpointContainer", arg0, arg1) + ret0, _ := ret[0].(*v1.CheckpointContainerResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CheckpointContainer indicates an expected call of CheckpointContainer. +func (mr *MockRuntimeServiceServerMockRecorder) CheckpointContainer(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointContainer", reflect.TypeOf((*MockRuntimeServiceServer)(nil).CheckpointContainer), arg0, arg1) +} + // ContainerStats mocks base method. func (m *MockRuntimeServiceServer) ContainerStats(arg0 context.Context, arg1 *v1.ContainerStatsRequest) (*v1.ContainerStatsResponse, error) { m.ctrl.T.Helper() @@ -646,6 +885,20 @@ func (mr *MockRuntimeServiceServerMockRecorder) ExecSync(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecSync", reflect.TypeOf((*MockRuntimeServiceServer)(nil).ExecSync), arg0, arg1) } +// GetContainerEvents mocks base method. +func (m *MockRuntimeServiceServer) GetContainerEvents(arg0 *v1.GetEventsRequest, arg1 v1.RuntimeService_GetContainerEventsServer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetContainerEvents", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// GetContainerEvents indicates an expected call of GetContainerEvents. +func (mr *MockRuntimeServiceServerMockRecorder) GetContainerEvents(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContainerEvents", reflect.TypeOf((*MockRuntimeServiceServer)(nil).GetContainerEvents), arg0, arg1) +} + // ListContainerStats mocks base method. func (m *MockRuntimeServiceServer) ListContainerStats(arg0 context.Context, arg1 *v1.ListContainerStatsRequest) (*v1.ListContainerStatsResponse, error) { m.ctrl.T.Helper() @@ -676,6 +929,21 @@ func (mr *MockRuntimeServiceServerMockRecorder) ListContainers(arg0, arg1 interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListContainers", reflect.TypeOf((*MockRuntimeServiceServer)(nil).ListContainers), arg0, arg1) } +// ListMetricDescriptors mocks base method. +func (m *MockRuntimeServiceServer) ListMetricDescriptors(arg0 context.Context, arg1 *v1.ListMetricDescriptorsRequest) (*v1.ListMetricDescriptorsResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMetricDescriptors", arg0, arg1) + ret0, _ := ret[0].(*v1.ListMetricDescriptorsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMetricDescriptors indicates an expected call of ListMetricDescriptors. +func (mr *MockRuntimeServiceServerMockRecorder) ListMetricDescriptors(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMetricDescriptors", reflect.TypeOf((*MockRuntimeServiceServer)(nil).ListMetricDescriptors), arg0, arg1) +} + // ListPodSandbox mocks base method. func (m *MockRuntimeServiceServer) ListPodSandbox(arg0 context.Context, arg1 *v1.ListPodSandboxRequest) (*v1.ListPodSandboxResponse, error) { m.ctrl.T.Helper() @@ -691,6 +959,21 @@ func (mr *MockRuntimeServiceServerMockRecorder) ListPodSandbox(arg0, arg1 interf return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodSandbox", reflect.TypeOf((*MockRuntimeServiceServer)(nil).ListPodSandbox), arg0, arg1) } +// ListPodSandboxMetrics mocks base method. +func (m *MockRuntimeServiceServer) ListPodSandboxMetrics(arg0 context.Context, arg1 *v1.ListPodSandboxMetricsRequest) (*v1.ListPodSandboxMetricsResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListPodSandboxMetrics", arg0, arg1) + ret0, _ := ret[0].(*v1.ListPodSandboxMetricsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListPodSandboxMetrics indicates an expected call of ListPodSandboxMetrics. +func (mr *MockRuntimeServiceServerMockRecorder) ListPodSandboxMetrics(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodSandboxMetrics", reflect.TypeOf((*MockRuntimeServiceServer)(nil).ListPodSandboxMetrics), arg0, arg1) +} + // ListPodSandboxStats mocks base method. func (m *MockRuntimeServiceServer) ListPodSandboxStats(arg0 context.Context, arg1 *v1.ListPodSandboxStatsRequest) (*v1.ListPodSandboxStatsResponse, error) { m.ctrl.T.Helper() @@ -811,6 +1094,21 @@ func (mr *MockRuntimeServiceServerMockRecorder) RunPodSandbox(arg0, arg1 interfa return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunPodSandbox", reflect.TypeOf((*MockRuntimeServiceServer)(nil).RunPodSandbox), arg0, arg1) } +// RuntimeConfig mocks base method. +func (m *MockRuntimeServiceServer) RuntimeConfig(arg0 context.Context, arg1 *v1.RuntimeConfigRequest) (*v1.RuntimeConfigResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RuntimeConfig", arg0, arg1) + ret0, _ := ret[0].(*v1.RuntimeConfigResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RuntimeConfig indicates an expected call of RuntimeConfig. +func (mr *MockRuntimeServiceServerMockRecorder) RuntimeConfig(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RuntimeConfig", reflect.TypeOf((*MockRuntimeServiceServer)(nil).RuntimeConfig), arg0, arg1) +} + // StartContainer mocks base method. func (m *MockRuntimeServiceServer) StartContainer(arg0 context.Context, arg1 *v1.StartContainerRequest) (*v1.StartContainerResponse, error) { m.ctrl.T.Helper() @@ -916,6 +1214,125 @@ func (mr *MockRuntimeServiceServerMockRecorder) Version(arg0, arg1 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockRuntimeServiceServer)(nil).Version), arg0, arg1) } +// MockRuntimeService_GetContainerEventsServer is a mock of RuntimeService_GetContainerEventsServer interface. +type MockRuntimeService_GetContainerEventsServer struct { + ctrl *gomock.Controller + recorder *MockRuntimeService_GetContainerEventsServerMockRecorder +} + +// MockRuntimeService_GetContainerEventsServerMockRecorder is the mock recorder for MockRuntimeService_GetContainerEventsServer. +type MockRuntimeService_GetContainerEventsServerMockRecorder struct { + mock *MockRuntimeService_GetContainerEventsServer +} + +// NewMockRuntimeService_GetContainerEventsServer creates a new mock instance. +func NewMockRuntimeService_GetContainerEventsServer(ctrl *gomock.Controller) *MockRuntimeService_GetContainerEventsServer { + mock := &MockRuntimeService_GetContainerEventsServer{ctrl: ctrl} + mock.recorder = &MockRuntimeService_GetContainerEventsServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRuntimeService_GetContainerEventsServer) EXPECT() *MockRuntimeService_GetContainerEventsServerMockRecorder { + return m.recorder +} + +// Context mocks base method. +func (m *MockRuntimeService_GetContainerEventsServer) Context() context.Context { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(context.Context) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockRuntimeService_GetContainerEventsServerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockRuntimeService_GetContainerEventsServer)(nil).Context)) +} + +// RecvMsg mocks base method. +func (m_2 *MockRuntimeService_GetContainerEventsServer) RecvMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "RecvMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// RecvMsg indicates an expected call of RecvMsg. +func (mr *MockRuntimeService_GetContainerEventsServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockRuntimeService_GetContainerEventsServer)(nil).RecvMsg), m) +} + +// Send mocks base method. +func (m *MockRuntimeService_GetContainerEventsServer) Send(arg0 *v1.ContainerEventResponse) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockRuntimeService_GetContainerEventsServerMockRecorder) Send(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockRuntimeService_GetContainerEventsServer)(nil).Send), arg0) +} + +// SendHeader mocks base method. +func (m *MockRuntimeService_GetContainerEventsServer) SendHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendHeader indicates an expected call of SendHeader. +func (mr *MockRuntimeService_GetContainerEventsServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockRuntimeService_GetContainerEventsServer)(nil).SendHeader), arg0) +} + +// SendMsg mocks base method. +func (m_2 *MockRuntimeService_GetContainerEventsServer) SendMsg(m interface{}) error { + m_2.ctrl.T.Helper() + ret := m_2.ctrl.Call(m_2, "SendMsg", m) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendMsg indicates an expected call of SendMsg. +func (mr *MockRuntimeService_GetContainerEventsServerMockRecorder) SendMsg(m interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockRuntimeService_GetContainerEventsServer)(nil).SendMsg), m) +} + +// SetHeader mocks base method. +func (m *MockRuntimeService_GetContainerEventsServer) SetHeader(arg0 metadata.MD) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetHeader", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetHeader indicates an expected call of SetHeader. +func (mr *MockRuntimeService_GetContainerEventsServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockRuntimeService_GetContainerEventsServer)(nil).SetHeader), arg0) +} + +// SetTrailer mocks base method. +func (m *MockRuntimeService_GetContainerEventsServer) SetTrailer(arg0 metadata.MD) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTrailer", arg0) +} + +// SetTrailer indicates an expected call of SetTrailer. +func (mr *MockRuntimeService_GetContainerEventsServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockRuntimeService_GetContainerEventsServer)(nil).SetTrailer), arg0) +} + // MockImageServiceClient is a mock of ImageServiceClient interface. type MockImageServiceClient struct { ctrl *gomock.Controller diff --git a/pkg/koordlet/util/runtime/handler/mockv1alpha2client/mock.go b/pkg/koordlet/util/runtime/handler/mockv1alpha2client/mock.go deleted file mode 100644 index 57e2d401a..000000000 --- a/pkg/koordlet/util/runtime/handler/mockv1alpha2client/mock.go +++ /dev/null @@ -1,1138 +0,0 @@ -/* -Copyright 2022 The Koordinator Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// - -// Code generated by MockGen. DO NOT EDIT. -// Source: vendor/k8s.io/cri-api/pkg/apis/runtime/v1alpha2/api.pb.go - -// Package mockv1alpha2_client is a generated GoMock package. -package mockv1alpha2_client - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - grpc "google.golang.org/grpc" - v1alpha2 "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" -) - -// MockRuntimeServiceClient is a mock of RuntimeServiceClient interface. -type MockRuntimeServiceClient struct { - ctrl *gomock.Controller - recorder *MockRuntimeServiceClientMockRecorder -} - -// MockRuntimeServiceClientMockRecorder is the mock recorder for MockRuntimeServiceClient. -type MockRuntimeServiceClientMockRecorder struct { - mock *MockRuntimeServiceClient -} - -// NewMockRuntimeServiceClient creates a new mock instance. -func NewMockRuntimeServiceClient(ctrl *gomock.Controller) *MockRuntimeServiceClient { - mock := &MockRuntimeServiceClient{ctrl: ctrl} - mock.recorder = &MockRuntimeServiceClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockRuntimeServiceClient) EXPECT() *MockRuntimeServiceClientMockRecorder { - return m.recorder -} - -// Attach mocks base method. -func (m *MockRuntimeServiceClient) Attach(ctx context.Context, in *v1alpha2.AttachRequest, opts ...grpc.CallOption) (*v1alpha2.AttachResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Attach", varargs...) - ret0, _ := ret[0].(*v1alpha2.AttachResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Attach indicates an expected call of Attach. -func (mr *MockRuntimeServiceClientMockRecorder) Attach(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attach", reflect.TypeOf((*MockRuntimeServiceClient)(nil).Attach), varargs...) -} - -// ContainerStats mocks base method. -func (m *MockRuntimeServiceClient) ContainerStats(ctx context.Context, in *v1alpha2.ContainerStatsRequest, opts ...grpc.CallOption) (*v1alpha2.ContainerStatsResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ContainerStats", varargs...) - ret0, _ := ret[0].(*v1alpha2.ContainerStatsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ContainerStats indicates an expected call of ContainerStats. -func (mr *MockRuntimeServiceClientMockRecorder) ContainerStats(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerStats", reflect.TypeOf((*MockRuntimeServiceClient)(nil).ContainerStats), varargs...) -} - -// ContainerStatus mocks base method. -func (m *MockRuntimeServiceClient) ContainerStatus(ctx context.Context, in *v1alpha2.ContainerStatusRequest, opts ...grpc.CallOption) (*v1alpha2.ContainerStatusResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ContainerStatus", varargs...) - ret0, _ := ret[0].(*v1alpha2.ContainerStatusResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ContainerStatus indicates an expected call of ContainerStatus. -func (mr *MockRuntimeServiceClientMockRecorder) ContainerStatus(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerStatus", reflect.TypeOf((*MockRuntimeServiceClient)(nil).ContainerStatus), varargs...) -} - -// CreateContainer mocks base method. -func (m *MockRuntimeServiceClient) CreateContainer(ctx context.Context, in *v1alpha2.CreateContainerRequest, opts ...grpc.CallOption) (*v1alpha2.CreateContainerResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "CreateContainer", varargs...) - ret0, _ := ret[0].(*v1alpha2.CreateContainerResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateContainer indicates an expected call of CreateContainer. -func (mr *MockRuntimeServiceClientMockRecorder) CreateContainer(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateContainer", reflect.TypeOf((*MockRuntimeServiceClient)(nil).CreateContainer), varargs...) -} - -// Exec mocks base method. -func (m *MockRuntimeServiceClient) Exec(ctx context.Context, in *v1alpha2.ExecRequest, opts ...grpc.CallOption) (*v1alpha2.ExecResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Exec", varargs...) - ret0, _ := ret[0].(*v1alpha2.ExecResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Exec indicates an expected call of Exec. -func (mr *MockRuntimeServiceClientMockRecorder) Exec(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockRuntimeServiceClient)(nil).Exec), varargs...) -} - -// ExecSync mocks base method. -func (m *MockRuntimeServiceClient) ExecSync(ctx context.Context, in *v1alpha2.ExecSyncRequest, opts ...grpc.CallOption) (*v1alpha2.ExecSyncResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ExecSync", varargs...) - ret0, _ := ret[0].(*v1alpha2.ExecSyncResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExecSync indicates an expected call of ExecSync. -func (mr *MockRuntimeServiceClientMockRecorder) ExecSync(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecSync", reflect.TypeOf((*MockRuntimeServiceClient)(nil).ExecSync), varargs...) -} - -// ListContainerStats mocks base method. -func (m *MockRuntimeServiceClient) ListContainerStats(ctx context.Context, in *v1alpha2.ListContainerStatsRequest, opts ...grpc.CallOption) (*v1alpha2.ListContainerStatsResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListContainerStats", varargs...) - ret0, _ := ret[0].(*v1alpha2.ListContainerStatsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListContainerStats indicates an expected call of ListContainerStats. -func (mr *MockRuntimeServiceClientMockRecorder) ListContainerStats(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListContainerStats", reflect.TypeOf((*MockRuntimeServiceClient)(nil).ListContainerStats), varargs...) -} - -// ListContainers mocks base method. -func (m *MockRuntimeServiceClient) ListContainers(ctx context.Context, in *v1alpha2.ListContainersRequest, opts ...grpc.CallOption) (*v1alpha2.ListContainersResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListContainers", varargs...) - ret0, _ := ret[0].(*v1alpha2.ListContainersResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListContainers indicates an expected call of ListContainers. -func (mr *MockRuntimeServiceClientMockRecorder) ListContainers(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListContainers", reflect.TypeOf((*MockRuntimeServiceClient)(nil).ListContainers), varargs...) -} - -// ListPodSandbox mocks base method. -func (m *MockRuntimeServiceClient) ListPodSandbox(ctx context.Context, in *v1alpha2.ListPodSandboxRequest, opts ...grpc.CallOption) (*v1alpha2.ListPodSandboxResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListPodSandbox", varargs...) - ret0, _ := ret[0].(*v1alpha2.ListPodSandboxResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListPodSandbox indicates an expected call of ListPodSandbox. -func (mr *MockRuntimeServiceClientMockRecorder) ListPodSandbox(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodSandbox", reflect.TypeOf((*MockRuntimeServiceClient)(nil).ListPodSandbox), varargs...) -} - -// ListPodSandboxStats mocks base method. -func (m *MockRuntimeServiceClient) ListPodSandboxStats(ctx context.Context, in *v1alpha2.ListPodSandboxStatsRequest, opts ...grpc.CallOption) (*v1alpha2.ListPodSandboxStatsResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListPodSandboxStats", varargs...) - ret0, _ := ret[0].(*v1alpha2.ListPodSandboxStatsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListPodSandboxStats indicates an expected call of ListPodSandboxStats. -func (mr *MockRuntimeServiceClientMockRecorder) ListPodSandboxStats(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodSandboxStats", reflect.TypeOf((*MockRuntimeServiceClient)(nil).ListPodSandboxStats), varargs...) -} - -// PodSandboxStats mocks base method. -func (m *MockRuntimeServiceClient) PodSandboxStats(ctx context.Context, in *v1alpha2.PodSandboxStatsRequest, opts ...grpc.CallOption) (*v1alpha2.PodSandboxStatsResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PodSandboxStats", varargs...) - ret0, _ := ret[0].(*v1alpha2.PodSandboxStatsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PodSandboxStats indicates an expected call of PodSandboxStats. -func (mr *MockRuntimeServiceClientMockRecorder) PodSandboxStats(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PodSandboxStats", reflect.TypeOf((*MockRuntimeServiceClient)(nil).PodSandboxStats), varargs...) -} - -// PodSandboxStatus mocks base method. -func (m *MockRuntimeServiceClient) PodSandboxStatus(ctx context.Context, in *v1alpha2.PodSandboxStatusRequest, opts ...grpc.CallOption) (*v1alpha2.PodSandboxStatusResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PodSandboxStatus", varargs...) - ret0, _ := ret[0].(*v1alpha2.PodSandboxStatusResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PodSandboxStatus indicates an expected call of PodSandboxStatus. -func (mr *MockRuntimeServiceClientMockRecorder) PodSandboxStatus(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PodSandboxStatus", reflect.TypeOf((*MockRuntimeServiceClient)(nil).PodSandboxStatus), varargs...) -} - -// PortForward mocks base method. -func (m *MockRuntimeServiceClient) PortForward(ctx context.Context, in *v1alpha2.PortForwardRequest, opts ...grpc.CallOption) (*v1alpha2.PortForwardResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PortForward", varargs...) - ret0, _ := ret[0].(*v1alpha2.PortForwardResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PortForward indicates an expected call of PortForward. -func (mr *MockRuntimeServiceClientMockRecorder) PortForward(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortForward", reflect.TypeOf((*MockRuntimeServiceClient)(nil).PortForward), varargs...) -} - -// RemoveContainer mocks base method. -func (m *MockRuntimeServiceClient) RemoveContainer(ctx context.Context, in *v1alpha2.RemoveContainerRequest, opts ...grpc.CallOption) (*v1alpha2.RemoveContainerResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "RemoveContainer", varargs...) - ret0, _ := ret[0].(*v1alpha2.RemoveContainerResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RemoveContainer indicates an expected call of RemoveContainer. -func (mr *MockRuntimeServiceClientMockRecorder) RemoveContainer(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveContainer", reflect.TypeOf((*MockRuntimeServiceClient)(nil).RemoveContainer), varargs...) -} - -// RemovePodSandbox mocks base method. -func (m *MockRuntimeServiceClient) RemovePodSandbox(ctx context.Context, in *v1alpha2.RemovePodSandboxRequest, opts ...grpc.CallOption) (*v1alpha2.RemovePodSandboxResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "RemovePodSandbox", varargs...) - ret0, _ := ret[0].(*v1alpha2.RemovePodSandboxResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RemovePodSandbox indicates an expected call of RemovePodSandbox. -func (mr *MockRuntimeServiceClientMockRecorder) RemovePodSandbox(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemovePodSandbox", reflect.TypeOf((*MockRuntimeServiceClient)(nil).RemovePodSandbox), varargs...) -} - -// ReopenContainerLog mocks base method. -func (m *MockRuntimeServiceClient) ReopenContainerLog(ctx context.Context, in *v1alpha2.ReopenContainerLogRequest, opts ...grpc.CallOption) (*v1alpha2.ReopenContainerLogResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ReopenContainerLog", varargs...) - ret0, _ := ret[0].(*v1alpha2.ReopenContainerLogResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReopenContainerLog indicates an expected call of ReopenContainerLog. -func (mr *MockRuntimeServiceClientMockRecorder) ReopenContainerLog(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReopenContainerLog", reflect.TypeOf((*MockRuntimeServiceClient)(nil).ReopenContainerLog), varargs...) -} - -// RunPodSandbox mocks base method. -func (m *MockRuntimeServiceClient) RunPodSandbox(ctx context.Context, in *v1alpha2.RunPodSandboxRequest, opts ...grpc.CallOption) (*v1alpha2.RunPodSandboxResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "RunPodSandbox", varargs...) - ret0, _ := ret[0].(*v1alpha2.RunPodSandboxResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RunPodSandbox indicates an expected call of RunPodSandbox. -func (mr *MockRuntimeServiceClientMockRecorder) RunPodSandbox(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunPodSandbox", reflect.TypeOf((*MockRuntimeServiceClient)(nil).RunPodSandbox), varargs...) -} - -// StartContainer mocks base method. -func (m *MockRuntimeServiceClient) StartContainer(ctx context.Context, in *v1alpha2.StartContainerRequest, opts ...grpc.CallOption) (*v1alpha2.StartContainerResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "StartContainer", varargs...) - ret0, _ := ret[0].(*v1alpha2.StartContainerResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// StartContainer indicates an expected call of StartContainer. -func (mr *MockRuntimeServiceClientMockRecorder) StartContainer(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartContainer", reflect.TypeOf((*MockRuntimeServiceClient)(nil).StartContainer), varargs...) -} - -// Status mocks base method. -func (m *MockRuntimeServiceClient) Status(ctx context.Context, in *v1alpha2.StatusRequest, opts ...grpc.CallOption) (*v1alpha2.StatusResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Status", varargs...) - ret0, _ := ret[0].(*v1alpha2.StatusResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Status indicates an expected call of Status. -func (mr *MockRuntimeServiceClientMockRecorder) Status(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockRuntimeServiceClient)(nil).Status), varargs...) -} - -// StopContainer mocks base method. -func (m *MockRuntimeServiceClient) StopContainer(ctx context.Context, in *v1alpha2.StopContainerRequest, opts ...grpc.CallOption) (*v1alpha2.StopContainerResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "StopContainer", varargs...) - ret0, _ := ret[0].(*v1alpha2.StopContainerResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// StopContainer indicates an expected call of StopContainer. -func (mr *MockRuntimeServiceClientMockRecorder) StopContainer(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopContainer", reflect.TypeOf((*MockRuntimeServiceClient)(nil).StopContainer), varargs...) -} - -// StopPodSandbox mocks base method. -func (m *MockRuntimeServiceClient) StopPodSandbox(ctx context.Context, in *v1alpha2.StopPodSandboxRequest, opts ...grpc.CallOption) (*v1alpha2.StopPodSandboxResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "StopPodSandbox", varargs...) - ret0, _ := ret[0].(*v1alpha2.StopPodSandboxResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// StopPodSandbox indicates an expected call of StopPodSandbox. -func (mr *MockRuntimeServiceClientMockRecorder) StopPodSandbox(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopPodSandbox", reflect.TypeOf((*MockRuntimeServiceClient)(nil).StopPodSandbox), varargs...) -} - -// UpdateContainerResources mocks base method. -func (m *MockRuntimeServiceClient) UpdateContainerResources(ctx context.Context, in *v1alpha2.UpdateContainerResourcesRequest, opts ...grpc.CallOption) (*v1alpha2.UpdateContainerResourcesResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "UpdateContainerResources", varargs...) - ret0, _ := ret[0].(*v1alpha2.UpdateContainerResourcesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateContainerResources indicates an expected call of UpdateContainerResources. -func (mr *MockRuntimeServiceClientMockRecorder) UpdateContainerResources(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateContainerResources", reflect.TypeOf((*MockRuntimeServiceClient)(nil).UpdateContainerResources), varargs...) -} - -// UpdateRuntimeConfig mocks base method. -func (m *MockRuntimeServiceClient) UpdateRuntimeConfig(ctx context.Context, in *v1alpha2.UpdateRuntimeConfigRequest, opts ...grpc.CallOption) (*v1alpha2.UpdateRuntimeConfigResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "UpdateRuntimeConfig", varargs...) - ret0, _ := ret[0].(*v1alpha2.UpdateRuntimeConfigResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateRuntimeConfig indicates an expected call of UpdateRuntimeConfig. -func (mr *MockRuntimeServiceClientMockRecorder) UpdateRuntimeConfig(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRuntimeConfig", reflect.TypeOf((*MockRuntimeServiceClient)(nil).UpdateRuntimeConfig), varargs...) -} - -// Version mocks base method. -func (m *MockRuntimeServiceClient) Version(ctx context.Context, in *v1alpha2.VersionRequest, opts ...grpc.CallOption) (*v1alpha2.VersionResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Version", varargs...) - ret0, _ := ret[0].(*v1alpha2.VersionResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Version indicates an expected call of Version. -func (mr *MockRuntimeServiceClientMockRecorder) Version(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockRuntimeServiceClient)(nil).Version), varargs...) -} - -// MockRuntimeServiceServer is a mock of RuntimeServiceServer interface. -type MockRuntimeServiceServer struct { - ctrl *gomock.Controller - recorder *MockRuntimeServiceServerMockRecorder -} - -// MockRuntimeServiceServerMockRecorder is the mock recorder for MockRuntimeServiceServer. -type MockRuntimeServiceServerMockRecorder struct { - mock *MockRuntimeServiceServer -} - -// NewMockRuntimeServiceServer creates a new mock instance. -func NewMockRuntimeServiceServer(ctrl *gomock.Controller) *MockRuntimeServiceServer { - mock := &MockRuntimeServiceServer{ctrl: ctrl} - mock.recorder = &MockRuntimeServiceServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockRuntimeServiceServer) EXPECT() *MockRuntimeServiceServerMockRecorder { - return m.recorder -} - -// Attach mocks base method. -func (m *MockRuntimeServiceServer) Attach(arg0 context.Context, arg1 *v1alpha2.AttachRequest) (*v1alpha2.AttachResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Attach", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.AttachResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Attach indicates an expected call of Attach. -func (mr *MockRuntimeServiceServerMockRecorder) Attach(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attach", reflect.TypeOf((*MockRuntimeServiceServer)(nil).Attach), arg0, arg1) -} - -// ContainerStats mocks base method. -func (m *MockRuntimeServiceServer) ContainerStats(arg0 context.Context, arg1 *v1alpha2.ContainerStatsRequest) (*v1alpha2.ContainerStatsResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ContainerStats", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.ContainerStatsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ContainerStats indicates an expected call of ContainerStats. -func (mr *MockRuntimeServiceServerMockRecorder) ContainerStats(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerStats", reflect.TypeOf((*MockRuntimeServiceServer)(nil).ContainerStats), arg0, arg1) -} - -// ContainerStatus mocks base method. -func (m *MockRuntimeServiceServer) ContainerStatus(arg0 context.Context, arg1 *v1alpha2.ContainerStatusRequest) (*v1alpha2.ContainerStatusResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ContainerStatus", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.ContainerStatusResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ContainerStatus indicates an expected call of ContainerStatus. -func (mr *MockRuntimeServiceServerMockRecorder) ContainerStatus(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerStatus", reflect.TypeOf((*MockRuntimeServiceServer)(nil).ContainerStatus), arg0, arg1) -} - -// CreateContainer mocks base method. -func (m *MockRuntimeServiceServer) CreateContainer(arg0 context.Context, arg1 *v1alpha2.CreateContainerRequest) (*v1alpha2.CreateContainerResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateContainer", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.CreateContainerResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateContainer indicates an expected call of CreateContainer. -func (mr *MockRuntimeServiceServerMockRecorder) CreateContainer(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateContainer", reflect.TypeOf((*MockRuntimeServiceServer)(nil).CreateContainer), arg0, arg1) -} - -// Exec mocks base method. -func (m *MockRuntimeServiceServer) Exec(arg0 context.Context, arg1 *v1alpha2.ExecRequest) (*v1alpha2.ExecResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Exec", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.ExecResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Exec indicates an expected call of Exec. -func (mr *MockRuntimeServiceServerMockRecorder) Exec(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockRuntimeServiceServer)(nil).Exec), arg0, arg1) -} - -// ExecSync mocks base method. -func (m *MockRuntimeServiceServer) ExecSync(arg0 context.Context, arg1 *v1alpha2.ExecSyncRequest) (*v1alpha2.ExecSyncResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExecSync", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.ExecSyncResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExecSync indicates an expected call of ExecSync. -func (mr *MockRuntimeServiceServerMockRecorder) ExecSync(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecSync", reflect.TypeOf((*MockRuntimeServiceServer)(nil).ExecSync), arg0, arg1) -} - -// ListContainerStats mocks base method. -func (m *MockRuntimeServiceServer) ListContainerStats(arg0 context.Context, arg1 *v1alpha2.ListContainerStatsRequest) (*v1alpha2.ListContainerStatsResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListContainerStats", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.ListContainerStatsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListContainerStats indicates an expected call of ListContainerStats. -func (mr *MockRuntimeServiceServerMockRecorder) ListContainerStats(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListContainerStats", reflect.TypeOf((*MockRuntimeServiceServer)(nil).ListContainerStats), arg0, arg1) -} - -// ListContainers mocks base method. -func (m *MockRuntimeServiceServer) ListContainers(arg0 context.Context, arg1 *v1alpha2.ListContainersRequest) (*v1alpha2.ListContainersResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListContainers", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.ListContainersResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListContainers indicates an expected call of ListContainers. -func (mr *MockRuntimeServiceServerMockRecorder) ListContainers(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListContainers", reflect.TypeOf((*MockRuntimeServiceServer)(nil).ListContainers), arg0, arg1) -} - -// ListPodSandbox mocks base method. -func (m *MockRuntimeServiceServer) ListPodSandbox(arg0 context.Context, arg1 *v1alpha2.ListPodSandboxRequest) (*v1alpha2.ListPodSandboxResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListPodSandbox", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.ListPodSandboxResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListPodSandbox indicates an expected call of ListPodSandbox. -func (mr *MockRuntimeServiceServerMockRecorder) ListPodSandbox(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodSandbox", reflect.TypeOf((*MockRuntimeServiceServer)(nil).ListPodSandbox), arg0, arg1) -} - -// ListPodSandboxStats mocks base method. -func (m *MockRuntimeServiceServer) ListPodSandboxStats(arg0 context.Context, arg1 *v1alpha2.ListPodSandboxStatsRequest) (*v1alpha2.ListPodSandboxStatsResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListPodSandboxStats", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.ListPodSandboxStatsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListPodSandboxStats indicates an expected call of ListPodSandboxStats. -func (mr *MockRuntimeServiceServerMockRecorder) ListPodSandboxStats(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodSandboxStats", reflect.TypeOf((*MockRuntimeServiceServer)(nil).ListPodSandboxStats), arg0, arg1) -} - -// PodSandboxStats mocks base method. -func (m *MockRuntimeServiceServer) PodSandboxStats(arg0 context.Context, arg1 *v1alpha2.PodSandboxStatsRequest) (*v1alpha2.PodSandboxStatsResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PodSandboxStats", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.PodSandboxStatsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PodSandboxStats indicates an expected call of PodSandboxStats. -func (mr *MockRuntimeServiceServerMockRecorder) PodSandboxStats(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PodSandboxStats", reflect.TypeOf((*MockRuntimeServiceServer)(nil).PodSandboxStats), arg0, arg1) -} - -// PodSandboxStatus mocks base method. -func (m *MockRuntimeServiceServer) PodSandboxStatus(arg0 context.Context, arg1 *v1alpha2.PodSandboxStatusRequest) (*v1alpha2.PodSandboxStatusResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PodSandboxStatus", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.PodSandboxStatusResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PodSandboxStatus indicates an expected call of PodSandboxStatus. -func (mr *MockRuntimeServiceServerMockRecorder) PodSandboxStatus(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PodSandboxStatus", reflect.TypeOf((*MockRuntimeServiceServer)(nil).PodSandboxStatus), arg0, arg1) -} - -// PortForward mocks base method. -func (m *MockRuntimeServiceServer) PortForward(arg0 context.Context, arg1 *v1alpha2.PortForwardRequest) (*v1alpha2.PortForwardResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PortForward", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.PortForwardResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PortForward indicates an expected call of PortForward. -func (mr *MockRuntimeServiceServerMockRecorder) PortForward(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortForward", reflect.TypeOf((*MockRuntimeServiceServer)(nil).PortForward), arg0, arg1) -} - -// RemoveContainer mocks base method. -func (m *MockRuntimeServiceServer) RemoveContainer(arg0 context.Context, arg1 *v1alpha2.RemoveContainerRequest) (*v1alpha2.RemoveContainerResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemoveContainer", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.RemoveContainerResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RemoveContainer indicates an expected call of RemoveContainer. -func (mr *MockRuntimeServiceServerMockRecorder) RemoveContainer(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveContainer", reflect.TypeOf((*MockRuntimeServiceServer)(nil).RemoveContainer), arg0, arg1) -} - -// RemovePodSandbox mocks base method. -func (m *MockRuntimeServiceServer) RemovePodSandbox(arg0 context.Context, arg1 *v1alpha2.RemovePodSandboxRequest) (*v1alpha2.RemovePodSandboxResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemovePodSandbox", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.RemovePodSandboxResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RemovePodSandbox indicates an expected call of RemovePodSandbox. -func (mr *MockRuntimeServiceServerMockRecorder) RemovePodSandbox(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemovePodSandbox", reflect.TypeOf((*MockRuntimeServiceServer)(nil).RemovePodSandbox), arg0, arg1) -} - -// ReopenContainerLog mocks base method. -func (m *MockRuntimeServiceServer) ReopenContainerLog(arg0 context.Context, arg1 *v1alpha2.ReopenContainerLogRequest) (*v1alpha2.ReopenContainerLogResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReopenContainerLog", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.ReopenContainerLogResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReopenContainerLog indicates an expected call of ReopenContainerLog. -func (mr *MockRuntimeServiceServerMockRecorder) ReopenContainerLog(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReopenContainerLog", reflect.TypeOf((*MockRuntimeServiceServer)(nil).ReopenContainerLog), arg0, arg1) -} - -// RunPodSandbox mocks base method. -func (m *MockRuntimeServiceServer) RunPodSandbox(arg0 context.Context, arg1 *v1alpha2.RunPodSandboxRequest) (*v1alpha2.RunPodSandboxResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RunPodSandbox", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.RunPodSandboxResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RunPodSandbox indicates an expected call of RunPodSandbox. -func (mr *MockRuntimeServiceServerMockRecorder) RunPodSandbox(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunPodSandbox", reflect.TypeOf((*MockRuntimeServiceServer)(nil).RunPodSandbox), arg0, arg1) -} - -// StartContainer mocks base method. -func (m *MockRuntimeServiceServer) StartContainer(arg0 context.Context, arg1 *v1alpha2.StartContainerRequest) (*v1alpha2.StartContainerResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartContainer", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.StartContainerResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// StartContainer indicates an expected call of StartContainer. -func (mr *MockRuntimeServiceServerMockRecorder) StartContainer(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartContainer", reflect.TypeOf((*MockRuntimeServiceServer)(nil).StartContainer), arg0, arg1) -} - -// Status mocks base method. -func (m *MockRuntimeServiceServer) Status(arg0 context.Context, arg1 *v1alpha2.StatusRequest) (*v1alpha2.StatusResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Status", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.StatusResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Status indicates an expected call of Status. -func (mr *MockRuntimeServiceServerMockRecorder) Status(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockRuntimeServiceServer)(nil).Status), arg0, arg1) -} - -// StopContainer mocks base method. -func (m *MockRuntimeServiceServer) StopContainer(arg0 context.Context, arg1 *v1alpha2.StopContainerRequest) (*v1alpha2.StopContainerResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StopContainer", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.StopContainerResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// StopContainer indicates an expected call of StopContainer. -func (mr *MockRuntimeServiceServerMockRecorder) StopContainer(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopContainer", reflect.TypeOf((*MockRuntimeServiceServer)(nil).StopContainer), arg0, arg1) -} - -// StopPodSandbox mocks base method. -func (m *MockRuntimeServiceServer) StopPodSandbox(arg0 context.Context, arg1 *v1alpha2.StopPodSandboxRequest) (*v1alpha2.StopPodSandboxResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StopPodSandbox", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.StopPodSandboxResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// StopPodSandbox indicates an expected call of StopPodSandbox. -func (mr *MockRuntimeServiceServerMockRecorder) StopPodSandbox(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopPodSandbox", reflect.TypeOf((*MockRuntimeServiceServer)(nil).StopPodSandbox), arg0, arg1) -} - -// UpdateContainerResources mocks base method. -func (m *MockRuntimeServiceServer) UpdateContainerResources(arg0 context.Context, arg1 *v1alpha2.UpdateContainerResourcesRequest) (*v1alpha2.UpdateContainerResourcesResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateContainerResources", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.UpdateContainerResourcesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateContainerResources indicates an expected call of UpdateContainerResources. -func (mr *MockRuntimeServiceServerMockRecorder) UpdateContainerResources(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateContainerResources", reflect.TypeOf((*MockRuntimeServiceServer)(nil).UpdateContainerResources), arg0, arg1) -} - -// UpdateRuntimeConfig mocks base method. -func (m *MockRuntimeServiceServer) UpdateRuntimeConfig(arg0 context.Context, arg1 *v1alpha2.UpdateRuntimeConfigRequest) (*v1alpha2.UpdateRuntimeConfigResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateRuntimeConfig", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.UpdateRuntimeConfigResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateRuntimeConfig indicates an expected call of UpdateRuntimeConfig. -func (mr *MockRuntimeServiceServerMockRecorder) UpdateRuntimeConfig(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRuntimeConfig", reflect.TypeOf((*MockRuntimeServiceServer)(nil).UpdateRuntimeConfig), arg0, arg1) -} - -// Version mocks base method. -func (m *MockRuntimeServiceServer) Version(arg0 context.Context, arg1 *v1alpha2.VersionRequest) (*v1alpha2.VersionResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Version", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.VersionResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Version indicates an expected call of Version. -func (mr *MockRuntimeServiceServerMockRecorder) Version(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockRuntimeServiceServer)(nil).Version), arg0, arg1) -} - -// MockImageServiceClient is a mock of ImageServiceClient interface. -type MockImageServiceClient struct { - ctrl *gomock.Controller - recorder *MockImageServiceClientMockRecorder -} - -// MockImageServiceClientMockRecorder is the mock recorder for MockImageServiceClient. -type MockImageServiceClientMockRecorder struct { - mock *MockImageServiceClient -} - -// NewMockImageServiceClient creates a new mock instance. -func NewMockImageServiceClient(ctrl *gomock.Controller) *MockImageServiceClient { - mock := &MockImageServiceClient{ctrl: ctrl} - mock.recorder = &MockImageServiceClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockImageServiceClient) EXPECT() *MockImageServiceClientMockRecorder { - return m.recorder -} - -// ImageFsInfo mocks base method. -func (m *MockImageServiceClient) ImageFsInfo(ctx context.Context, in *v1alpha2.ImageFsInfoRequest, opts ...grpc.CallOption) (*v1alpha2.ImageFsInfoResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ImageFsInfo", varargs...) - ret0, _ := ret[0].(*v1alpha2.ImageFsInfoResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ImageFsInfo indicates an expected call of ImageFsInfo. -func (mr *MockImageServiceClientMockRecorder) ImageFsInfo(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageFsInfo", reflect.TypeOf((*MockImageServiceClient)(nil).ImageFsInfo), varargs...) -} - -// ImageStatus mocks base method. -func (m *MockImageServiceClient) ImageStatus(ctx context.Context, in *v1alpha2.ImageStatusRequest, opts ...grpc.CallOption) (*v1alpha2.ImageStatusResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ImageStatus", varargs...) - ret0, _ := ret[0].(*v1alpha2.ImageStatusResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ImageStatus indicates an expected call of ImageStatus. -func (mr *MockImageServiceClientMockRecorder) ImageStatus(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStatus", reflect.TypeOf((*MockImageServiceClient)(nil).ImageStatus), varargs...) -} - -// ListImages mocks base method. -func (m *MockImageServiceClient) ListImages(ctx context.Context, in *v1alpha2.ListImagesRequest, opts ...grpc.CallOption) (*v1alpha2.ListImagesResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListImages", varargs...) - ret0, _ := ret[0].(*v1alpha2.ListImagesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListImages indicates an expected call of ListImages. -func (mr *MockImageServiceClientMockRecorder) ListImages(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockImageServiceClient)(nil).ListImages), varargs...) -} - -// PullImage mocks base method. -func (m *MockImageServiceClient) PullImage(ctx context.Context, in *v1alpha2.PullImageRequest, opts ...grpc.CallOption) (*v1alpha2.PullImageResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PullImage", varargs...) - ret0, _ := ret[0].(*v1alpha2.PullImageResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PullImage indicates an expected call of PullImage. -func (mr *MockImageServiceClientMockRecorder) PullImage(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockImageServiceClient)(nil).PullImage), varargs...) -} - -// RemoveImage mocks base method. -func (m *MockImageServiceClient) RemoveImage(ctx context.Context, in *v1alpha2.RemoveImageRequest, opts ...grpc.CallOption) (*v1alpha2.RemoveImageResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "RemoveImage", varargs...) - ret0, _ := ret[0].(*v1alpha2.RemoveImageResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RemoveImage indicates an expected call of RemoveImage. -func (mr *MockImageServiceClientMockRecorder) RemoveImage(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockImageServiceClient)(nil).RemoveImage), varargs...) -} - -// MockImageServiceServer is a mock of ImageServiceServer interface. -type MockImageServiceServer struct { - ctrl *gomock.Controller - recorder *MockImageServiceServerMockRecorder -} - -// MockImageServiceServerMockRecorder is the mock recorder for MockImageServiceServer. -type MockImageServiceServerMockRecorder struct { - mock *MockImageServiceServer -} - -// NewMockImageServiceServer creates a new mock instance. -func NewMockImageServiceServer(ctrl *gomock.Controller) *MockImageServiceServer { - mock := &MockImageServiceServer{ctrl: ctrl} - mock.recorder = &MockImageServiceServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockImageServiceServer) EXPECT() *MockImageServiceServerMockRecorder { - return m.recorder -} - -// ImageFsInfo mocks base method. -func (m *MockImageServiceServer) ImageFsInfo(arg0 context.Context, arg1 *v1alpha2.ImageFsInfoRequest) (*v1alpha2.ImageFsInfoResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImageFsInfo", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.ImageFsInfoResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ImageFsInfo indicates an expected call of ImageFsInfo. -func (mr *MockImageServiceServerMockRecorder) ImageFsInfo(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageFsInfo", reflect.TypeOf((*MockImageServiceServer)(nil).ImageFsInfo), arg0, arg1) -} - -// ImageStatus mocks base method. -func (m *MockImageServiceServer) ImageStatus(arg0 context.Context, arg1 *v1alpha2.ImageStatusRequest) (*v1alpha2.ImageStatusResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImageStatus", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.ImageStatusResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ImageStatus indicates an expected call of ImageStatus. -func (mr *MockImageServiceServerMockRecorder) ImageStatus(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStatus", reflect.TypeOf((*MockImageServiceServer)(nil).ImageStatus), arg0, arg1) -} - -// ListImages mocks base method. -func (m *MockImageServiceServer) ListImages(arg0 context.Context, arg1 *v1alpha2.ListImagesRequest) (*v1alpha2.ListImagesResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListImages", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.ListImagesResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListImages indicates an expected call of ListImages. -func (mr *MockImageServiceServerMockRecorder) ListImages(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockImageServiceServer)(nil).ListImages), arg0, arg1) -} - -// PullImage mocks base method. -func (m *MockImageServiceServer) PullImage(arg0 context.Context, arg1 *v1alpha2.PullImageRequest) (*v1alpha2.PullImageResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PullImage", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.PullImageResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PullImage indicates an expected call of PullImage. -func (mr *MockImageServiceServerMockRecorder) PullImage(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockImageServiceServer)(nil).PullImage), arg0, arg1) -} - -// RemoveImage mocks base method. -func (m *MockImageServiceServer) RemoveImage(arg0 context.Context, arg1 *v1alpha2.RemoveImageRequest) (*v1alpha2.RemoveImageResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemoveImage", arg0, arg1) - ret0, _ := ret[0].(*v1alpha2.RemoveImageResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RemoveImage indicates an expected call of RemoveImage. -func (mr *MockImageServiceServerMockRecorder) RemoveImage(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockImageServiceServer)(nil).RemoveImage), arg0, arg1) -} diff --git a/pkg/koordlet/util/runtime/handler/pouch_runtime.go b/pkg/koordlet/util/runtime/handler/pouch_runtime.go index 30d707feb..593b026e3 100644 --- a/pkg/koordlet/util/runtime/handler/pouch_runtime.go +++ b/pkg/koordlet/util/runtime/handler/pouch_runtime.go @@ -24,7 +24,7 @@ import ( "strings" "time" - "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" + v1 "k8s.io/cri-api/pkg/apis/runtime/v1" "github.com/koordinator-sh/koordinator/pkg/koordlet/util/system" ) @@ -38,7 +38,7 @@ func GetPouchEndpoint() string { } type PouchRuntimeHandler struct { - runtimeServiceClient v1alpha2.RuntimeServiceClient + runtimeServiceClient v1.RuntimeServiceClient timeout time.Duration endpoint string } @@ -61,12 +61,12 @@ func NewPouchRuntimeHandler(endpoint string) (ContainerRuntimeHandler, error) { }, nil } -func (c *PouchRuntimeHandler) StopContainer(containerID string, timeout int64) error { +func (c *PouchRuntimeHandler) StopContainer(ctx context.Context, containerID string, timeout int64) error { if containerID == "" { return fmt.Errorf("containerID cannot be empty") } - request := &v1alpha2.StopContainerRequest{ + request := &v1.StopContainerRequest{ ContainerId: containerID, Timeout: timeout, } @@ -81,9 +81,9 @@ func (c *PouchRuntimeHandler) UpdateContainerResources(containerID string, opts } ctx, cancel := context.WithTimeout(context.Background(), c.timeout) defer cancel() - request := &v1alpha2.UpdateContainerResourcesRequest{ + request := &v1.UpdateContainerResourcesRequest{ ContainerId: containerID, - Linux: &v1alpha2.LinuxContainerResources{ + Linux: &v1.LinuxContainerResources{ CpuPeriod: opts.CPUPeriod, CpuQuota: opts.CPUQuota, CpuShares: opts.CPUShares, @@ -97,11 +97,11 @@ func (c *PouchRuntimeHandler) UpdateContainerResources(containerID string, opts return err } -func getRuntimeV1alpha2Client(endpoint string) (v1alpha2.RuntimeServiceClient, error) { +func getRuntimeV1alpha2Client(endpoint string) (v1.RuntimeServiceClient, error) { conn, err := getClientConnection(endpoint) if err != nil { return nil, fmt.Errorf("failed to connect: %v", err) } - runtimeClient := v1alpha2.NewRuntimeServiceClient(conn) + runtimeClient := v1.NewRuntimeServiceClient(conn) return runtimeClient, nil } diff --git a/pkg/koordlet/util/runtime/handler/pouch_runtime_test.go b/pkg/koordlet/util/runtime/handler/pouch_runtime_test.go index d03276207..7f6a360e8 100644 --- a/pkg/koordlet/util/runtime/handler/pouch_runtime_test.go +++ b/pkg/koordlet/util/runtime/handler/pouch_runtime_test.go @@ -27,7 +27,7 @@ import ( "github.com/stretchr/testify/assert" "google.golang.org/grpc" - mockv1alpha2_client "github.com/koordinator-sh/koordinator/pkg/koordlet/util/runtime/handler/mockv1alpha2client" + mockclient "github.com/koordinator-sh/koordinator/pkg/koordlet/util/runtime/handler/mockclient" "github.com/koordinator-sh/koordinator/pkg/koordlet/util/system" ) @@ -83,11 +83,11 @@ func TestPouchRuntimeHandler_StopContainer(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() - mockRuntimeClient := mockv1alpha2_client.NewMockRuntimeServiceClient(ctl) + mockRuntimeClient := mockclient.NewMockRuntimeServiceClient(ctl) mockRuntimeClient.EXPECT().StopContainer(gomock.Any(), gomock.Any()).Return(nil, tt.runtimeError) runtimeHandler := PouchRuntimeHandler{runtimeServiceClient: mockRuntimeClient, timeout: 1, endpoint: GetContainerdEndpoint()} - gotErr := runtimeHandler.StopContainer(tt.containerId, 1) + gotErr := runtimeHandler.StopContainer(context.TODO(), tt.containerId, 1) assert.Equal(t, gotErr != nil, tt.expectError) }) @@ -119,7 +119,7 @@ func TestPouchRuntimeHandler_UpdateContainerResources(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() - mockRuntimeClient := mockv1alpha2_client.NewMockRuntimeServiceClient(ctl) + mockRuntimeClient := mockclient.NewMockRuntimeServiceClient(ctl) mockRuntimeClient.EXPECT().UpdateContainerResources(gomock.Any(), gomock.Any()).Return(nil, tt.runtimeError) runtimeHandler := PouchRuntimeHandler{runtimeServiceClient: mockRuntimeClient, timeout: 1, endpoint: GetContainerdEndpoint()} diff --git a/pkg/koordlet/util/testutil/mock_utils.go b/pkg/koordlet/util/testutil/mock_utils.go index 2e6717935..8141b2af1 100644 --- a/pkg/koordlet/util/testutil/mock_utils.go +++ b/pkg/koordlet/util/testutil/mock_utils.go @@ -89,6 +89,10 @@ func MockTestPod(qosClass apiext.QoSClass, name string) *corev1.Pod { var PodsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} +var EvictionResource = schema.GroupVersionResource{Group: "policy", Version: "v1beta1", Resource: "evictions"} + +var EvictionKind = schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "Eviction"} + func BuildMockQueryResult(ctrl *gomock.Controller, querier *mock_metriccache.MockQuerier, factory *mock_metriccache.MockAggregateResultFactory, queryMeta metriccache.MetricMeta, value float64) { result := mock_metriccache.NewMockAggregateResult(ctrl) diff --git a/pkg/quota-controller/profile/profile_controller.go b/pkg/quota-controller/profile/profile_controller.go index b3d6ac698..27a68756b 100644 --- a/pkg/quota-controller/profile/profile_controller.go +++ b/pkg/quota-controller/profile/profile_controller.go @@ -41,10 +41,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" - schedv1alpha1 "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" "github.com/koordinator-sh/koordinator/apis/quota/v1alpha1" + schedv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" utilclient "github.com/koordinator-sh/koordinator/pkg/util/client" ) diff --git a/pkg/quota-controller/profile/profile_controller_test.go b/pkg/quota-controller/profile/profile_controller_test.go index f1013cf78..4e7017a71 100644 --- a/pkg/quota-controller/profile/profile_controller_test.go +++ b/pkg/quota-controller/profile/profile_controller_test.go @@ -32,10 +32,10 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" - schedv1alpha1 "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" quotav1alpha1 "github.com/koordinator-sh/koordinator/apis/quota/v1alpha1" + schedv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" ) func createResourceList(cpu, mem int64) corev1.ResourceList { diff --git a/pkg/runtimeproxy/server/cri/criserver.go b/pkg/runtimeproxy/server/cri/criserver.go index 849b62034..1dd601c9d 100644 --- a/pkg/runtimeproxy/server/cri/criserver.go +++ b/pkg/runtimeproxy/server/cri/criserver.go @@ -25,7 +25,6 @@ import ( "github.com/mwitkow/grpc-proxy/proxy" "google.golang.org/grpc" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - runtimeapialpha "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "k8s.io/klog/v2" "github.com/koordinator-sh/koordinator/cmd/koord-runtime-proxy/options" @@ -44,20 +43,16 @@ type RuntimeRequestInterceptor interface { InterceptRuntimeRequest(serviceType RuntimeServiceType, ctx context.Context, request interface{}, handler grpc.UnaryHandler, alphaRuntime bool) (interface{}, error) } +var _ runtimeapi.RuntimeServiceServer = &criServer{} + type criServer struct { RuntimeRequestInterceptor backendRuntimeServiceClient runtimeapi.RuntimeServiceClient } -type criAlphaServer struct { - RuntimeRequestInterceptor - backendRuntimeServiceClient runtimeapialpha.RuntimeServiceClient -} - type RuntimeManagerCriServer struct { hookDispatcher *dispatcher.RuntimeHookDispatcher criServer *criServer - criAlphaServer *criAlphaServer } func NewRuntimeManagerCriServer() *RuntimeManagerCriServer { @@ -96,9 +91,6 @@ func (c *RuntimeManagerCriServer) Run() error { if c.criServer != nil { runtimeapi.RegisterRuntimeServiceServer(grpcServer, c.criServer) } - if c.criAlphaServer != nil { - runtimeapialpha.RegisterRuntimeServiceServer(grpcServer, c.criAlphaServer) - } err = grpcServer.Serve(listener) return err } @@ -129,12 +121,12 @@ func (c *RuntimeManagerCriServer) InterceptRuntimeRequest(serviceType RuntimeSer resourceExecutor := resource_executor.NewRuntimeResourceExecutor(runtimeResourceType) var err error - if alphaRuntime { - request, err = alphaObjectToV1Object(request) - if err != nil { - return nil, err - } - } + //if alphaRuntime { + // request, err = alphaObjectToV1Object(request) + // if err != nil { + // return nil, err + // } + //} callHookOperation, err := resourceExecutor.ParseRequest(request) if err != nil { klog.Errorf("fail to parse request %v %v", request, err) @@ -157,22 +149,22 @@ func (c *RuntimeManagerCriServer) InterceptRuntimeRequest(serviceType RuntimeSer } } // call the backend runtime engine - if alphaRuntime { - request, err = v1ObjectToAlphaObject(request) - if err != nil { - return nil, err - } - } + //if alphaRuntime { + // request, err = v1ObjectToAlphaObject(request) + // if err != nil { + // return nil, err + // } + //} res, err := handler(ctx, request) - responseConverted := false + // responseConverted := false if err == nil { - if alphaRuntime { - responseConverted = true - res, err = alphaObjectToV1Object(res) - if err != nil { - return nil, err - } - } + //if alphaRuntime { + // responseConverted = true + // res, err = alphaObjectToV1Object(res) + // if err != nil { + // return nil, err + // } + //} klog.Infof("%v call containerd %v success", resourceExecutor.GetMetaInfo(), string(runtimeHookPath)) // store checkpoint info basing request only when response success if err := resourceExecutor.ResourceCheckPoint(res); err != nil { @@ -187,12 +179,12 @@ func (c *RuntimeManagerCriServer) InterceptRuntimeRequest(serviceType RuntimeSer // TODO the response c.hookDispatcher.Dispatch(ctx, runtimeHookPath, config.PostHook, resourceExecutor.GenerateHookRequest()) } - if responseConverted { - res, err = v1ObjectToAlphaObject(res) - if err != nil { - return nil, err - } - } + // if responseConverted { + //res, err = v1ObjectToAlphaObject(res) + //if err != nil { + // return nil, err + //} + // } return res, err } @@ -222,15 +214,8 @@ func (c *RuntimeManagerCriServer) initCriServer(runtimeSockPath string) (*grpc.C backendRuntimeServiceClient: runtimeapi.NewRuntimeServiceClient(runtimeConn), } } - _, alphaErr := runtimeapialpha.NewRuntimeServiceClient(runtimeConn).Version(context.Background(), &runtimeapialpha.VersionRequest{}) - if alphaErr == nil { - c.criAlphaServer = &criAlphaServer{ - RuntimeRequestInterceptor: c, - backendRuntimeServiceClient: runtimeapialpha.NewRuntimeServiceClient(runtimeConn), - } - } - if c.criServer == nil && c.criAlphaServer == nil { - err = fmt.Errorf("%s, %s", v1Err.Error(), alphaErr.Error()) + if c.criServer == nil { + err = fmt.Errorf("%s", v1Err.Error()) klog.Errorf("fail to create cri service %v", err) return nil, err } @@ -246,15 +231,6 @@ func (c *RuntimeManagerCriServer) failOver() error { if err != nil { return err } - } else { - podResponseAlpha, err := c.criAlphaServer.backendRuntimeServiceClient.ListPodSandbox(context.TODO(), &runtimeapialpha.ListPodSandboxRequest{}) - if err != nil { - return err - } - err = convert(podResponseAlpha, podResponse) - if err != nil { - return err - } } for _, pod := range podResponse.Items { @@ -271,15 +247,6 @@ func (c *RuntimeManagerCriServer) failOver() error { if err != nil { return err } - } else { - containerResponseAlpha, err := c.criAlphaServer.ListContainers(context.TODO(), &runtimeapialpha.ListContainersRequest{}) - if err != nil { - return err - } - err = convert(containerResponseAlpha, podResponse) - if err != nil { - return err - } } for _, container := range containerResponse.Containers { containerExecutor := cri_resource_executor.NewContainerResourceExecutor() diff --git a/pkg/runtimeproxy/server/cri/runtime.go b/pkg/runtimeproxy/server/cri/runtime.go index dd220b882..f0d3bfa3b 100644 --- a/pkg/runtimeproxy/server/cri/runtime.go +++ b/pkg/runtimeproxy/server/cri/runtime.go @@ -19,8 +19,10 @@ package cri import ( "context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - runtimeapialpha "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" ) func (c *criServer) Version(ctx context.Context, req *runtimeapi.VersionRequest) (*runtimeapi.VersionResponse, error) { @@ -165,144 +167,22 @@ func (c *criServer) PodSandboxStats(ctx context.Context, in *runtimeapi.PodSandb return c.backendRuntimeServiceClient.PodSandboxStats(ctx, in) } -func (c *criAlphaServer) Version(ctx context.Context, req *runtimeapialpha.VersionRequest) (*runtimeapialpha.VersionResponse, error) { - return c.backendRuntimeServiceClient.Version(ctx, req) -} - -func (c *criAlphaServer) RunPodSandbox(ctx context.Context, req *runtimeapialpha.RunPodSandboxRequest) (*runtimeapialpha.RunPodSandboxResponse, error) { - rsp, err := c.InterceptRuntimeRequest(RunPodSandbox, ctx, req, - func(ctx context.Context, req interface{}) (interface{}, error) { - return c.backendRuntimeServiceClient.RunPodSandbox(ctx, req.(*runtimeapialpha.RunPodSandboxRequest)) - }, true) - if err != nil { - return nil, err - } - return rsp.(*runtimeapialpha.RunPodSandboxResponse), err -} -func (c *criAlphaServer) StopPodSandbox(ctx context.Context, req *runtimeapialpha.StopPodSandboxRequest) (*runtimeapialpha.StopPodSandboxResponse, error) { - rsp, err := c.InterceptRuntimeRequest(StopPodSandbox, ctx, req, - func(ctx context.Context, req interface{}) (interface{}, error) { - return c.backendRuntimeServiceClient.StopPodSandbox(ctx, req.(*runtimeapialpha.StopPodSandboxRequest)) - }, true) - - if err != nil { - return nil, err - } - return rsp.(*runtimeapialpha.StopPodSandboxResponse), err -} - -func (c *criAlphaServer) RemovePodSandbox(ctx context.Context, req *runtimeapialpha.RemovePodSandboxRequest) (*runtimeapialpha.RemovePodSandboxResponse, error) { - return c.backendRuntimeServiceClient.RemovePodSandbox(ctx, req) -} - -func (c *criAlphaServer) PodSandboxStatus(ctx context.Context, req *runtimeapialpha.PodSandboxStatusRequest) (*runtimeapialpha.PodSandboxStatusResponse, error) { - return c.backendRuntimeServiceClient.PodSandboxStatus(ctx, req) -} - -func (c *criAlphaServer) ListPodSandbox(ctx context.Context, req *runtimeapialpha.ListPodSandboxRequest) (*runtimeapialpha.ListPodSandboxResponse, error) { - return c.backendRuntimeServiceClient.ListPodSandbox(ctx, req) +func (c *criServer) CheckpointContainer(ctx context.Context, req *runtimeapi.CheckpointContainerRequest) (*runtimeapi.CheckpointContainerResponse, error) { + return c.backendRuntimeServiceClient.CheckpointContainer(ctx, req) } -func (c *criAlphaServer) CreateContainer(ctx context.Context, req *runtimeapialpha.CreateContainerRequest) (*runtimeapialpha.CreateContainerResponse, error) { - rsp, err := c.InterceptRuntimeRequest(CreateContainer, ctx, req, - func(ctx context.Context, req interface{}) (interface{}, error) { - return c.backendRuntimeServiceClient.CreateContainer(ctx, req.(*runtimeapialpha.CreateContainerRequest)) - }, true) - if err != nil { - return nil, err - } - return rsp.(*runtimeapialpha.CreateContainerResponse), err -} - -func (c *criAlphaServer) StartContainer(ctx context.Context, req *runtimeapialpha.StartContainerRequest) (*runtimeapialpha.StartContainerResponse, error) { - rsp, err := c.InterceptRuntimeRequest(StartContainer, ctx, req, - func(ctx context.Context, req interface{}) (interface{}, error) { - return c.backendRuntimeServiceClient.StartContainer(ctx, req.(*runtimeapialpha.StartContainerRequest)) - }, true) - if err != nil { - return nil, err - } - return rsp.(*runtimeapialpha.StartContainerResponse), err +func (c *criServer) GetContainerEvents(req *runtimeapi.GetEventsRequest, server runtimeapi.RuntimeService_GetContainerEventsServer) error { + return status.Errorf(codes.Unimplemented, "method GetContainerEvents not implemented") } -func (c *criAlphaServer) StopContainer(ctx context.Context, req *runtimeapialpha.StopContainerRequest) (*runtimeapialpha.StopContainerResponse, error) { - rsp, err := c.InterceptRuntimeRequest(StopContainer, ctx, req, - func(ctx context.Context, req interface{}) (interface{}, error) { - return c.backendRuntimeServiceClient.StopContainer(ctx, req.(*runtimeapialpha.StopContainerRequest)) - }, true) - if err != nil { - return nil, err - } - return rsp.(*runtimeapialpha.StopContainerResponse), err +func (c *criServer) ListMetricDescriptors(ctx context.Context, req *runtimeapi.ListMetricDescriptorsRequest) (*runtimeapi.ListMetricDescriptorsResponse, error) { + return c.backendRuntimeServiceClient.ListMetricDescriptors(ctx, req) } -func (c *criAlphaServer) RemoveContainer(ctx context.Context, req *runtimeapialpha.RemoveContainerRequest) (*runtimeapialpha.RemoveContainerResponse, error) { - rsp, err := c.InterceptRuntimeRequest(RemoveContainer, ctx, req, - func(ctx context.Context, req interface{}) (interface{}, error) { - return c.backendRuntimeServiceClient.RemoveContainer(ctx, req.(*runtimeapialpha.RemoveContainerRequest)) - }, true) - if err != nil { - return nil, err - } - return rsp.(*runtimeapialpha.RemoveContainerResponse), err -} - -func (c *criAlphaServer) ContainerStatus(ctx context.Context, req *runtimeapialpha.ContainerStatusRequest) (*runtimeapialpha.ContainerStatusResponse, error) { - return c.backendRuntimeServiceClient.ContainerStatus(ctx, req) -} - -func (c *criAlphaServer) ListContainers(ctx context.Context, req *runtimeapialpha.ListContainersRequest) (*runtimeapialpha.ListContainersResponse, error) { - return c.backendRuntimeServiceClient.ListContainers(ctx, req) -} - -func (c *criAlphaServer) UpdateContainerResources(ctx context.Context, req *runtimeapialpha.UpdateContainerResourcesRequest) (*runtimeapialpha.UpdateContainerResourcesResponse, error) { - rsp, err := c.InterceptRuntimeRequest(UpdateContainerResources, ctx, req, - func(ctx context.Context, req interface{}) (interface{}, error) { - return c.backendRuntimeServiceClient.UpdateContainerResources(ctx, req.(*runtimeapialpha.UpdateContainerResourcesRequest)) - }, true) - if err != nil { - return nil, err - } - return rsp.(*runtimeapialpha.UpdateContainerResourcesResponse), err -} - -func (c *criAlphaServer) ContainerStats(ctx context.Context, req *runtimeapialpha.ContainerStatsRequest) (*runtimeapialpha.ContainerStatsResponse, error) { - return c.backendRuntimeServiceClient.ContainerStats(ctx, req) -} -func (c *criAlphaServer) ListContainerStats(ctx context.Context, req *runtimeapialpha.ListContainerStatsRequest) (*runtimeapialpha.ListContainerStatsResponse, error) { - return c.backendRuntimeServiceClient.ListContainerStats(ctx, req) -} - -func (c *criAlphaServer) Status(ctx context.Context, req *runtimeapialpha.StatusRequest) (*runtimeapialpha.StatusResponse, error) { - return c.backendRuntimeServiceClient.Status(ctx, req) -} - -func (c *criAlphaServer) ReopenContainerLog(ctx context.Context, in *runtimeapialpha.ReopenContainerLogRequest) (*runtimeapialpha.ReopenContainerLogResponse, error) { - return c.backendRuntimeServiceClient.ReopenContainerLog(ctx, in) -} -func (c *criAlphaServer) ExecSync(ctx context.Context, in *runtimeapialpha.ExecSyncRequest) (*runtimeapialpha.ExecSyncResponse, error) { - return c.backendRuntimeServiceClient.ExecSync(ctx, in) -} -func (c *criAlphaServer) Exec(ctx context.Context, in *runtimeapialpha.ExecRequest) (*runtimeapialpha.ExecResponse, error) { - return c.backendRuntimeServiceClient.Exec(ctx, in) -} - -func (c *criAlphaServer) Attach(ctx context.Context, in *runtimeapialpha.AttachRequest) (*runtimeapialpha.AttachResponse, error) { - return c.backendRuntimeServiceClient.Attach(ctx, in) -} - -func (c *criAlphaServer) PortForward(ctx context.Context, in *runtimeapialpha.PortForwardRequest) (*runtimeapialpha.PortForwardResponse, error) { - return c.backendRuntimeServiceClient.PortForward(ctx, in) -} - -func (c *criAlphaServer) UpdateRuntimeConfig(ctx context.Context, in *runtimeapialpha.UpdateRuntimeConfigRequest) (*runtimeapialpha.UpdateRuntimeConfigResponse, error) { - return c.backendRuntimeServiceClient.UpdateRuntimeConfig(ctx, in) -} - -func (c *criAlphaServer) PodSandboxStats(ctx context.Context, in *runtimeapialpha.PodSandboxStatsRequest) (*runtimeapialpha.PodSandboxStatsResponse, error) { - return c.backendRuntimeServiceClient.PodSandboxStats(ctx, in) +func (c *criServer) ListPodSandboxMetrics(ctx context.Context, req *runtimeapi.ListPodSandboxMetricsRequest) (*runtimeapi.ListPodSandboxMetricsResponse, error) { + return c.backendRuntimeServiceClient.ListPodSandboxMetrics(ctx, req) } -func (c *criAlphaServer) ListPodSandboxStats(ctx context.Context, in *runtimeapialpha.ListPodSandboxStatsRequest) (*runtimeapialpha.ListPodSandboxStatsResponse, error) { - return c.backendRuntimeServiceClient.ListPodSandboxStats(ctx, in) +func (c *criServer) RuntimeConfig(ctx context.Context, req *runtimeapi.RuntimeConfigRequest) (*runtimeapi.RuntimeConfigResponse, error) { + return c.backendRuntimeServiceClient.RuntimeConfig(ctx, req) } diff --git a/pkg/runtimeproxy/server/cri/utils.go b/pkg/runtimeproxy/server/cri/utils.go index f0a959643..b46026ed0 100644 --- a/pkg/runtimeproxy/server/cri/utils.go +++ b/pkg/runtimeproxy/server/cri/utils.go @@ -16,13 +16,6 @@ limitations under the License. package cri -import ( - "errors" - - runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - runtimeapialpha "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" -) - type ServiceType int type RuntimeServiceType int type ImageServiceType int @@ -42,219 +35,220 @@ const ( UpdateContainerResources ) -func convert( - input interface{ Marshal() ([]byte, error) }, - output interface{ Unmarshal(_ []byte) error }, -) error { - p, err := input.Marshal() - if err != nil { - return err - } - - if err = output.Unmarshal(p); err != nil { - return err - } - return nil -} - -func alphaObjectToV1Object(obj interface{}) (interface{}, error) { - var v1Object interface{ Unmarshal(_ []byte) error } - switch obj.(type) { - case *runtimeapialpha.VersionRequest: - v1Object = &runtimeapi.VersionRequest{} - case *runtimeapialpha.VersionResponse: - v1Object = &runtimeapi.VersionResponse{} - case *runtimeapialpha.RunPodSandboxRequest: - v1Object = &runtimeapi.RunPodSandboxRequest{} - case *runtimeapialpha.RunPodSandboxResponse: - v1Object = &runtimeapi.RunPodSandboxResponse{} - case *runtimeapialpha.StopPodSandboxRequest: - v1Object = &runtimeapi.StopPodSandboxRequest{} - case *runtimeapialpha.StopPodSandboxResponse: - v1Object = &runtimeapi.StopPodSandboxResponse{} - case *runtimeapialpha.RemovePodSandboxRequest: - v1Object = &runtimeapi.RemovePodSandboxRequest{} - case *runtimeapialpha.RemovePodSandboxResponse: - v1Object = &runtimeapi.RemovePodSandboxResponse{} - case *runtimeapialpha.PodSandboxStatusRequest: - v1Object = &runtimeapi.PodSandboxStatusRequest{} - case *runtimeapialpha.PodSandboxStatusResponse: - v1Object = &runtimeapi.PodSandboxStatusResponse{} - case *runtimeapialpha.ListPodSandboxRequest: - v1Object = &runtimeapi.ListPodSandboxRequest{} - case *runtimeapialpha.ListPodSandboxResponse: - v1Object = &runtimeapi.ListPodSandboxResponse{} - case *runtimeapialpha.CreateContainerRequest: - v1Object = &runtimeapi.CreateContainerRequest{} - case *runtimeapialpha.CreateContainerResponse: - v1Object = &runtimeapi.CreateContainerResponse{} - case *runtimeapialpha.StartContainerRequest: - v1Object = &runtimeapi.StartContainerRequest{} - case *runtimeapialpha.StartContainerResponse: - v1Object = &runtimeapi.StartContainerResponse{} - case *runtimeapialpha.StopContainerRequest: - v1Object = &runtimeapi.StopContainerRequest{} - case *runtimeapialpha.StopContainerResponse: - v1Object = &runtimeapi.StopContainerResponse{} - case *runtimeapialpha.RemoveContainerRequest: - v1Object = &runtimeapi.RemoveContainerRequest{} - case *runtimeapialpha.RemoveContainerResponse: - v1Object = &runtimeapi.RemoveContainerResponse{} - case *runtimeapialpha.ContainerStatusRequest: - v1Object = &runtimeapi.ContainerStatusRequest{} - case *runtimeapialpha.ContainerStatusResponse: - v1Object = &runtimeapi.ContainerStatusResponse{} - case *runtimeapialpha.ListContainersRequest: - v1Object = &runtimeapi.ListContainersRequest{} - case *runtimeapialpha.ListContainersResponse: - v1Object = &runtimeapi.ListContainersResponse{} - case *runtimeapialpha.UpdateContainerResourcesRequest: - v1Object = &runtimeapi.UpdateContainerResourcesRequest{} - case *runtimeapialpha.UpdateContainerResourcesResponse: - v1Object = &runtimeapi.UpdateContainerResourcesResponse{} - case *runtimeapialpha.ContainerStatsRequest: - v1Object = &runtimeapi.ContainerStatsRequest{} - case *runtimeapialpha.ContainerStatsResponse: - v1Object = &runtimeapi.ContainerStatsResponse{} - case *runtimeapialpha.ListContainerStatsRequest: - v1Object = &runtimeapi.ListContainerStatsRequest{} - case *runtimeapialpha.ListContainerStatsResponse: - v1Object = &runtimeapi.ListContainerStatsResponse{} - case *runtimeapialpha.StatusRequest: - v1Object = &runtimeapi.StatusRequest{} - case *runtimeapialpha.StatusResponse: - v1Object = &runtimeapi.StatusResponse{} - case *runtimeapialpha.ReopenContainerLogRequest: - v1Object = &runtimeapi.ReopenContainerLogRequest{} - case *runtimeapialpha.ReopenContainerLogResponse: - v1Object = &runtimeapi.ReopenContainerLogResponse{} - case *runtimeapialpha.ExecSyncRequest: - v1Object = &runtimeapi.ExecSyncRequest{} - case *runtimeapialpha.ExecSyncResponse: - v1Object = &runtimeapi.ExecSyncResponse{} - case *runtimeapialpha.ExecRequest: - v1Object = &runtimeapi.ExecRequest{} - case *runtimeapialpha.ExecResponse: - v1Object = &runtimeapi.ExecResponse{} - case *runtimeapialpha.AttachRequest: - v1Object = &runtimeapi.AttachRequest{} - case *runtimeapialpha.AttachResponse: - v1Object = &runtimeapi.AttachResponse{} - case *runtimeapialpha.PortForwardRequest: - v1Object = &runtimeapi.PortForwardRequest{} - case *runtimeapialpha.PortForwardResponse: - v1Object = &runtimeapi.PortForwardResponse{} - case *runtimeapialpha.UpdateRuntimeConfigRequest: - v1Object = &runtimeapi.UpdateRuntimeConfigRequest{} - case *runtimeapialpha.UpdateRuntimeConfigResponse: - v1Object = &runtimeapi.UpdateRuntimeConfigResponse{} - default: - return nil, errors.New("invalid v1alpha2 cri api object") - } - err := convert(obj.(interface{ Marshal() ([]byte, error) }), v1Object) - if err != nil { - return nil, err - } - return v1Object, nil -} +//func convert( +// input interface{ Marshal() ([]byte, error) }, +// output interface{ Unmarshal(_ []byte) error }, +//) error { +// p, err := input.Marshal() +// if err != nil { +// return err +// } +// +// if err = output.Unmarshal(p); err != nil { +// return err +// } +// return nil +//} -func v1ObjectToAlphaObject(obj interface{}) (interface{}, error) { - var alphaObject interface{ Unmarshal(_ []byte) error } - switch obj.(type) { - case *runtimeapi.VersionRequest: - alphaObject = &runtimeapialpha.VersionRequest{} - case *runtimeapi.VersionResponse: - alphaObject = &runtimeapialpha.VersionResponse{} - case *runtimeapi.RunPodSandboxRequest: - alphaObject = &runtimeapialpha.RunPodSandboxRequest{} - case *runtimeapi.RunPodSandboxResponse: - alphaObject = &runtimeapialpha.RunPodSandboxResponse{} - case *runtimeapi.StopPodSandboxRequest: - alphaObject = &runtimeapialpha.StopPodSandboxRequest{} - case *runtimeapi.StopPodSandboxResponse: - alphaObject = &runtimeapialpha.StopPodSandboxResponse{} - case *runtimeapi.RemovePodSandboxRequest: - alphaObject = &runtimeapialpha.RemovePodSandboxRequest{} - case *runtimeapi.RemovePodSandboxResponse: - alphaObject = &runtimeapialpha.RemovePodSandboxResponse{} - case *runtimeapi.PodSandboxStatusRequest: - alphaObject = &runtimeapialpha.PodSandboxStatusRequest{} - case *runtimeapi.PodSandboxStatusResponse: - alphaObject = &runtimeapialpha.PodSandboxStatusResponse{} - case *runtimeapi.ListPodSandboxRequest: - alphaObject = &runtimeapialpha.ListPodSandboxRequest{} - case *runtimeapi.ListPodSandboxResponse: - alphaObject = &runtimeapialpha.ListPodSandboxResponse{} - case *runtimeapi.CreateContainerRequest: - alphaObject = &runtimeapialpha.CreateContainerRequest{} - case *runtimeapi.CreateContainerResponse: - alphaObject = &runtimeapialpha.CreateContainerResponse{} - case *runtimeapi.StartContainerRequest: - alphaObject = &runtimeapialpha.StartContainerRequest{} - case *runtimeapi.StartContainerResponse: - alphaObject = &runtimeapialpha.StartContainerResponse{} - case *runtimeapi.StopContainerRequest: - alphaObject = &runtimeapialpha.StopContainerRequest{} - case *runtimeapi.StopContainerResponse: - alphaObject = &runtimeapialpha.StopContainerResponse{} - case *runtimeapi.RemoveContainerRequest: - alphaObject = &runtimeapialpha.RemoveContainerRequest{} - case *runtimeapi.RemoveContainerResponse: - alphaObject = &runtimeapialpha.RemoveContainerResponse{} - case *runtimeapi.ContainerStatusRequest: - alphaObject = &runtimeapialpha.ContainerStatusRequest{} - case *runtimeapi.ContainerStatusResponse: - alphaObject = &runtimeapialpha.ContainerStatusResponse{} - case *runtimeapi.ListContainersRequest: - alphaObject = &runtimeapialpha.ListContainersRequest{} - case *runtimeapi.ListContainersResponse: - alphaObject = &runtimeapialpha.ListContainersResponse{} - case *runtimeapi.UpdateContainerResourcesRequest: - alphaObject = &runtimeapialpha.UpdateContainerResourcesRequest{} - case *runtimeapi.UpdateContainerResourcesResponse: - alphaObject = &runtimeapialpha.UpdateContainerResourcesResponse{} - case *runtimeapi.ContainerStatsRequest: - alphaObject = &runtimeapialpha.ContainerStatsRequest{} - case *runtimeapi.ContainerStatsResponse: - alphaObject = &runtimeapialpha.ContainerStatsResponse{} - case *runtimeapi.ListContainerStatsRequest: - alphaObject = &runtimeapialpha.ListContainerStatsRequest{} - case *runtimeapi.ListContainerStatsResponse: - alphaObject = &runtimeapialpha.ListContainerStatsResponse{} - case *runtimeapi.StatusRequest: - alphaObject = &runtimeapialpha.StatusRequest{} - case *runtimeapi.StatusResponse: - alphaObject = &runtimeapialpha.StatusResponse{} - case *runtimeapi.ReopenContainerLogRequest: - alphaObject = &runtimeapialpha.ReopenContainerLogRequest{} - case *runtimeapi.ReopenContainerLogResponse: - alphaObject = &runtimeapialpha.ReopenContainerLogResponse{} - case *runtimeapi.ExecSyncRequest: - alphaObject = &runtimeapialpha.ExecSyncRequest{} - case *runtimeapi.ExecSyncResponse: - alphaObject = &runtimeapialpha.ExecSyncResponse{} - case *runtimeapi.ExecRequest: - alphaObject = &runtimeapialpha.ExecRequest{} - case *runtimeapi.ExecResponse: - alphaObject = &runtimeapialpha.ExecResponse{} - case *runtimeapi.AttachRequest: - alphaObject = &runtimeapialpha.AttachRequest{} - case *runtimeapi.AttachResponse: - alphaObject = &runtimeapialpha.AttachResponse{} - case *runtimeapi.PortForwardRequest: - alphaObject = &runtimeapialpha.PortForwardRequest{} - case *runtimeapi.PortForwardResponse: - alphaObject = &runtimeapialpha.PortForwardResponse{} - case *runtimeapi.UpdateRuntimeConfigRequest: - alphaObject = &runtimeapialpha.UpdateRuntimeConfigRequest{} - case *runtimeapi.UpdateRuntimeConfigResponse: - alphaObject = &runtimeapialpha.UpdateRuntimeConfigResponse{} - default: - return nil, errors.New("invalid v1alpha2 cri api object") - } - err := convert(obj.(interface{ Marshal() ([]byte, error) }), alphaObject) - if err != nil { - return nil, err - } - return alphaObject, nil -} +// +//func alphaObjectToV1Object(obj interface{}) (interface{}, error) { +// var v1Object interface{ Unmarshal(_ []byte) error } +// switch obj.(type) { +// case *runtimeapialpha.VersionRequest: +// v1Object = &runtimeapi.VersionRequest{} +// case *runtimeapialpha.VersionResponse: +// v1Object = &runtimeapi.VersionResponse{} +// case *runtimeapialpha.RunPodSandboxRequest: +// v1Object = &runtimeapi.RunPodSandboxRequest{} +// case *runtimeapialpha.RunPodSandboxResponse: +// v1Object = &runtimeapi.RunPodSandboxResponse{} +// case *runtimeapialpha.StopPodSandboxRequest: +// v1Object = &runtimeapi.StopPodSandboxRequest{} +// case *runtimeapialpha.StopPodSandboxResponse: +// v1Object = &runtimeapi.StopPodSandboxResponse{} +// case *runtimeapialpha.RemovePodSandboxRequest: +// v1Object = &runtimeapi.RemovePodSandboxRequest{} +// case *runtimeapialpha.RemovePodSandboxResponse: +// v1Object = &runtimeapi.RemovePodSandboxResponse{} +// case *runtimeapialpha.PodSandboxStatusRequest: +// v1Object = &runtimeapi.PodSandboxStatusRequest{} +// case *runtimeapialpha.PodSandboxStatusResponse: +// v1Object = &runtimeapi.PodSandboxStatusResponse{} +// case *runtimeapialpha.ListPodSandboxRequest: +// v1Object = &runtimeapi.ListPodSandboxRequest{} +// case *runtimeapialpha.ListPodSandboxResponse: +// v1Object = &runtimeapi.ListPodSandboxResponse{} +// case *runtimeapialpha.CreateContainerRequest: +// v1Object = &runtimeapi.CreateContainerRequest{} +// case *runtimeapialpha.CreateContainerResponse: +// v1Object = &runtimeapi.CreateContainerResponse{} +// case *runtimeapialpha.StartContainerRequest: +// v1Object = &runtimeapi.StartContainerRequest{} +// case *runtimeapialpha.StartContainerResponse: +// v1Object = &runtimeapi.StartContainerResponse{} +// case *runtimeapialpha.StopContainerRequest: +// v1Object = &runtimeapi.StopContainerRequest{} +// case *runtimeapialpha.StopContainerResponse: +// v1Object = &runtimeapi.StopContainerResponse{} +// case *runtimeapialpha.RemoveContainerRequest: +// v1Object = &runtimeapi.RemoveContainerRequest{} +// case *runtimeapialpha.RemoveContainerResponse: +// v1Object = &runtimeapi.RemoveContainerResponse{} +// case *runtimeapialpha.ContainerStatusRequest: +// v1Object = &runtimeapi.ContainerStatusRequest{} +// case *runtimeapialpha.ContainerStatusResponse: +// v1Object = &runtimeapi.ContainerStatusResponse{} +// case *runtimeapialpha.ListContainersRequest: +// v1Object = &runtimeapi.ListContainersRequest{} +// case *runtimeapialpha.ListContainersResponse: +// v1Object = &runtimeapi.ListContainersResponse{} +// case *runtimeapialpha.UpdateContainerResourcesRequest: +// v1Object = &runtimeapi.UpdateContainerResourcesRequest{} +// case *runtimeapialpha.UpdateContainerResourcesResponse: +// v1Object = &runtimeapi.UpdateContainerResourcesResponse{} +// case *runtimeapialpha.ContainerStatsRequest: +// v1Object = &runtimeapi.ContainerStatsRequest{} +// case *runtimeapialpha.ContainerStatsResponse: +// v1Object = &runtimeapi.ContainerStatsResponse{} +// case *runtimeapialpha.ListContainerStatsRequest: +// v1Object = &runtimeapi.ListContainerStatsRequest{} +// case *runtimeapialpha.ListContainerStatsResponse: +// v1Object = &runtimeapi.ListContainerStatsResponse{} +// case *runtimeapialpha.StatusRequest: +// v1Object = &runtimeapi.StatusRequest{} +// case *runtimeapialpha.StatusResponse: +// v1Object = &runtimeapi.StatusResponse{} +// case *runtimeapialpha.ReopenContainerLogRequest: +// v1Object = &runtimeapi.ReopenContainerLogRequest{} +// case *runtimeapialpha.ReopenContainerLogResponse: +// v1Object = &runtimeapi.ReopenContainerLogResponse{} +// case *runtimeapialpha.ExecSyncRequest: +// v1Object = &runtimeapi.ExecSyncRequest{} +// case *runtimeapialpha.ExecSyncResponse: +// v1Object = &runtimeapi.ExecSyncResponse{} +// case *runtimeapialpha.ExecRequest: +// v1Object = &runtimeapi.ExecRequest{} +// case *runtimeapialpha.ExecResponse: +// v1Object = &runtimeapi.ExecResponse{} +// case *runtimeapialpha.AttachRequest: +// v1Object = &runtimeapi.AttachRequest{} +// case *runtimeapialpha.AttachResponse: +// v1Object = &runtimeapi.AttachResponse{} +// case *runtimeapialpha.PortForwardRequest: +// v1Object = &runtimeapi.PortForwardRequest{} +// case *runtimeapialpha.PortForwardResponse: +// v1Object = &runtimeapi.PortForwardResponse{} +// case *runtimeapialpha.UpdateRuntimeConfigRequest: +// v1Object = &runtimeapi.UpdateRuntimeConfigRequest{} +// case *runtimeapialpha.UpdateRuntimeConfigResponse: +// v1Object = &runtimeapi.UpdateRuntimeConfigResponse{} +// default: +// return nil, errors.New("invalid v1alpha2 cri api object") +// } +// err := convert(obj.(interface{ Marshal() ([]byte, error) }), v1Object) +// if err != nil { +// return nil, err +// } +// return v1Object, nil +//} +// +//func v1ObjectToAlphaObject(obj interface{}) (interface{}, error) { +// var alphaObject interface{ Unmarshal(_ []byte) error } +// switch obj.(type) { +// case *runtimeapi.VersionRequest: +// alphaObject = &runtimeapialpha.VersionRequest{} +// case *runtimeapi.VersionResponse: +// alphaObject = &runtimeapialpha.VersionResponse{} +// case *runtimeapi.RunPodSandboxRequest: +// alphaObject = &runtimeapialpha.RunPodSandboxRequest{} +// case *runtimeapi.RunPodSandboxResponse: +// alphaObject = &runtimeapialpha.RunPodSandboxResponse{} +// case *runtimeapi.StopPodSandboxRequest: +// alphaObject = &runtimeapialpha.StopPodSandboxRequest{} +// case *runtimeapi.StopPodSandboxResponse: +// alphaObject = &runtimeapialpha.StopPodSandboxResponse{} +// case *runtimeapi.RemovePodSandboxRequest: +// alphaObject = &runtimeapialpha.RemovePodSandboxRequest{} +// case *runtimeapi.RemovePodSandboxResponse: +// alphaObject = &runtimeapialpha.RemovePodSandboxResponse{} +// case *runtimeapi.PodSandboxStatusRequest: +// alphaObject = &runtimeapialpha.PodSandboxStatusRequest{} +// case *runtimeapi.PodSandboxStatusResponse: +// alphaObject = &runtimeapialpha.PodSandboxStatusResponse{} +// case *runtimeapi.ListPodSandboxRequest: +// alphaObject = &runtimeapialpha.ListPodSandboxRequest{} +// case *runtimeapi.ListPodSandboxResponse: +// alphaObject = &runtimeapialpha.ListPodSandboxResponse{} +// case *runtimeapi.CreateContainerRequest: +// alphaObject = &runtimeapialpha.CreateContainerRequest{} +// case *runtimeapi.CreateContainerResponse: +// alphaObject = &runtimeapialpha.CreateContainerResponse{} +// case *runtimeapi.StartContainerRequest: +// alphaObject = &runtimeapialpha.StartContainerRequest{} +// case *runtimeapi.StartContainerResponse: +// alphaObject = &runtimeapialpha.StartContainerResponse{} +// case *runtimeapi.StopContainerRequest: +// alphaObject = &runtimeapialpha.StopContainerRequest{} +// case *runtimeapi.StopContainerResponse: +// alphaObject = &runtimeapialpha.StopContainerResponse{} +// case *runtimeapi.RemoveContainerRequest: +// alphaObject = &runtimeapialpha.RemoveContainerRequest{} +// case *runtimeapi.RemoveContainerResponse: +// alphaObject = &runtimeapialpha.RemoveContainerResponse{} +// case *runtimeapi.ContainerStatusRequest: +// alphaObject = &runtimeapialpha.ContainerStatusRequest{} +// case *runtimeapi.ContainerStatusResponse: +// alphaObject = &runtimeapialpha.ContainerStatusResponse{} +// case *runtimeapi.ListContainersRequest: +// alphaObject = &runtimeapialpha.ListContainersRequest{} +// case *runtimeapi.ListContainersResponse: +// alphaObject = &runtimeapialpha.ListContainersResponse{} +// case *runtimeapi.UpdateContainerResourcesRequest: +// alphaObject = &runtimeapialpha.UpdateContainerResourcesRequest{} +// case *runtimeapi.UpdateContainerResourcesResponse: +// alphaObject = &runtimeapialpha.UpdateContainerResourcesResponse{} +// case *runtimeapi.ContainerStatsRequest: +// alphaObject = &runtimeapialpha.ContainerStatsRequest{} +// case *runtimeapi.ContainerStatsResponse: +// alphaObject = &runtimeapialpha.ContainerStatsResponse{} +// case *runtimeapi.ListContainerStatsRequest: +// alphaObject = &runtimeapialpha.ListContainerStatsRequest{} +// case *runtimeapi.ListContainerStatsResponse: +// alphaObject = &runtimeapialpha.ListContainerStatsResponse{} +// case *runtimeapi.StatusRequest: +// alphaObject = &runtimeapialpha.StatusRequest{} +// case *runtimeapi.StatusResponse: +// alphaObject = &runtimeapialpha.StatusResponse{} +// case *runtimeapi.ReopenContainerLogRequest: +// alphaObject = &runtimeapialpha.ReopenContainerLogRequest{} +// case *runtimeapi.ReopenContainerLogResponse: +// alphaObject = &runtimeapialpha.ReopenContainerLogResponse{} +// case *runtimeapi.ExecSyncRequest: +// alphaObject = &runtimeapialpha.ExecSyncRequest{} +// case *runtimeapi.ExecSyncResponse: +// alphaObject = &runtimeapialpha.ExecSyncResponse{} +// case *runtimeapi.ExecRequest: +// alphaObject = &runtimeapialpha.ExecRequest{} +// case *runtimeapi.ExecResponse: +// alphaObject = &runtimeapialpha.ExecResponse{} +// case *runtimeapi.AttachRequest: +// alphaObject = &runtimeapialpha.AttachRequest{} +// case *runtimeapi.AttachResponse: +// alphaObject = &runtimeapialpha.AttachResponse{} +// case *runtimeapi.PortForwardRequest: +// alphaObject = &runtimeapialpha.PortForwardRequest{} +// case *runtimeapi.PortForwardResponse: +// alphaObject = &runtimeapialpha.PortForwardResponse{} +// case *runtimeapi.UpdateRuntimeConfigRequest: +// alphaObject = &runtimeapialpha.UpdateRuntimeConfigRequest{} +// case *runtimeapi.UpdateRuntimeConfigResponse: +// alphaObject = &runtimeapialpha.UpdateRuntimeConfigResponse{} +// default: +// return nil, errors.New("invalid v1alpha2 cri api object") +// } +// err := convert(obj.(interface{ Marshal() ([]byte, error) }), alphaObject) +// if err != nil { +// return nil, err +// } +// return alphaObject, nil +//} diff --git a/pkg/scheduler/apis/config/scheme/scheme.go b/pkg/scheduler/apis/config/scheme/scheme.go index f1ef19a4d..274e29b34 100644 --- a/pkg/scheduler/apis/config/scheme/scheme.go +++ b/pkg/scheduler/apis/config/scheme/scheme.go @@ -23,7 +23,7 @@ import ( kubeschedulerscheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" - "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta2" + "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta3" ) var ( @@ -41,5 +41,5 @@ func init() { // AddToScheme builds the kubescheduler scheme using all known versions of the kubescheduler api. func AddToScheme(scheme *runtime.Scheme) { utilruntime.Must(config.AddToScheme(scheme)) - utilruntime.Must(v1beta2.AddToScheme(scheme)) + utilruntime.Must(v1beta3.AddToScheme(scheme)) } diff --git a/pkg/scheduler/apis/config/v1beta2/conversion_plugin.go b/pkg/scheduler/apis/config/v1beta3/conversion_plugin.go similarity index 86% rename from pkg/scheduler/apis/config/v1beta2/conversion_plugin.go rename to pkg/scheduler/apis/config/v1beta3/conversion_plugin.go index 9ee9d28a8..aa425cdd1 100644 --- a/pkg/scheduler/apis/config/v1beta2/conversion_plugin.go +++ b/pkg/scheduler/apis/config/v1beta3/conversion_plugin.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package v1beta3 import ( "k8s.io/apimachinery/pkg/conversion" @@ -22,8 +22,8 @@ import ( "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" ) -func Convert_v1beta2_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(in *LoadAwareSchedulingArgs, out *config.LoadAwareSchedulingArgs, s conversion.Scope) error { - if err := autoConvert_v1beta2_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(in, out, s); err != nil { +func Convert_v1beta3_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(in *LoadAwareSchedulingArgs, out *config.LoadAwareSchedulingArgs, s conversion.Scope) error { + if err := autoConvert_v1beta3_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(in, out, s); err != nil { return err } diff --git a/pkg/scheduler/apis/config/v1beta2/defaults.go b/pkg/scheduler/apis/config/v1beta3/defaults.go similarity index 96% rename from pkg/scheduler/apis/config/v1beta2/defaults.go rename to pkg/scheduler/apis/config/v1beta3/defaults.go index 357beb462..fbf0d246a 100644 --- a/pkg/scheduler/apis/config/v1beta2/defaults.go +++ b/pkg/scheduler/apis/config/v1beta3/defaults.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package v1beta3 import ( "math" @@ -23,7 +23,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedconfigv1beta2 "k8s.io/kube-scheduler/config/v1beta2" + schedconfigv1beta3 "k8s.io/kube-scheduler/config/v1beta3" "k8s.io/utils/pointer" "github.com/koordinator-sh/koordinator/apis/extension" @@ -107,7 +107,7 @@ func SetDefaults_NodeNUMAResourceArgs(obj *NodeNUMAResourceArgs) { if obj.ScoringStrategy == nil { obj.ScoringStrategy = &ScoringStrategy{ Type: LeastAllocated, - Resources: []schedconfigv1beta2.ResourceSpec{ + Resources: []schedconfigv1beta3.ResourceSpec{ { Name: string(corev1.ResourceCPU), Weight: 1, @@ -122,7 +122,7 @@ func SetDefaults_NodeNUMAResourceArgs(obj *NodeNUMAResourceArgs) { if obj.NUMAScoringStrategy == nil { obj.NUMAScoringStrategy = &ScoringStrategy{ Type: LeastAllocated, - Resources: []schedconfigv1beta2.ResourceSpec{ + Resources: []schedconfigv1beta3.ResourceSpec{ { Name: string(corev1.ResourceCPU), Weight: 1, @@ -189,7 +189,7 @@ func SetDefaults_DeviceShareArgs(obj *DeviceShareArgs) { obj.ScoringStrategy = &ScoringStrategy{ // By default, LeastAllocate is used to ensure high availability of applications Type: LeastAllocated, - Resources: []schedconfigv1beta2.ResourceSpec{ + Resources: []schedconfigv1beta3.ResourceSpec{ { Name: string(extension.ResourceGPUMemoryRatio), Weight: 1, diff --git a/pkg/scheduler/apis/config/v1beta2/doc.go b/pkg/scheduler/apis/config/v1beta3/doc.go similarity index 95% rename from pkg/scheduler/apis/config/v1beta2/doc.go rename to pkg/scheduler/apis/config/v1beta3/doc.go index 8fd841880..6fcec80b7 100644 --- a/pkg/scheduler/apis/config/v1beta2/doc.go +++ b/pkg/scheduler/apis/config/v1beta3/doc.go @@ -20,5 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=. // +groupName=kubescheduler.config.k8s.io -// Package v1beta2 -package v1beta2 +// Package v1beta3 +package v1beta3 diff --git a/pkg/scheduler/apis/config/v1beta2/register.go b/pkg/scheduler/apis/config/v1beta3/register.go similarity index 90% rename from pkg/scheduler/apis/config/v1beta2/register.go rename to pkg/scheduler/apis/config/v1beta3/register.go index fe3342c65..54ebd3936 100644 --- a/pkg/scheduler/apis/config/v1beta2/register.go +++ b/pkg/scheduler/apis/config/v1beta3/register.go @@ -14,19 +14,19 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package v1beta3 import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - schedschemev1beta2 "k8s.io/kube-scheduler/config/v1beta2" + schedschemev1beta3 "k8s.io/kube-scheduler/config/v1beta3" ) // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: schedschemev1beta2.GroupName, Version: "v1beta2"} +var SchemeGroupVersion = schema.GroupVersion{Group: schedschemev1beta3.GroupName, Version: "v1beta3"} var ( - localSchemeBuilder = &schedschemev1beta2.SchemeBuilder + localSchemeBuilder = &schedschemev1beta3.SchemeBuilder // AddToScheme is a global function that registers this API group & version to a scheme AddToScheme = localSchemeBuilder.AddToScheme ) diff --git a/pkg/scheduler/apis/config/v1beta2/types.go b/pkg/scheduler/apis/config/v1beta3/types.go similarity index 98% rename from pkg/scheduler/apis/config/v1beta2/types.go rename to pkg/scheduler/apis/config/v1beta3/types.go index f529fb540..12c35924c 100644 --- a/pkg/scheduler/apis/config/v1beta2/types.go +++ b/pkg/scheduler/apis/config/v1beta3/types.go @@ -14,12 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta2 +package v1beta3 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedconfigv1beta2 "k8s.io/kube-scheduler/config/v1beta2" + schedconfigv1beta3 "k8s.io/kube-scheduler/config/v1beta3" "github.com/koordinator-sh/koordinator/apis/extension" ) @@ -89,7 +89,7 @@ type ScoringStrategy struct { // Resources a list of pairs to be considered while scoring // allowed weights start from 1. - Resources []schedconfigv1beta2.ResourceSpec `json:"resources,omitempty"` + Resources []schedconfigv1beta3.ResourceSpec `json:"resources,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go b/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go similarity index 76% rename from pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go rename to pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go index e96fd50b5..49cd0f181 100644 --- a/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go +++ b/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go @@ -19,7 +19,7 @@ limitations under the License. // Code generated by conversion-gen. DO NOT EDIT. -package v1beta2 +package v1beta3 import ( unsafe "unsafe" @@ -30,7 +30,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - configv1beta2 "k8s.io/kube-scheduler/config/v1beta2" + configv1beta3 "k8s.io/kube-scheduler/config/v1beta3" apisconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" ) @@ -42,89 +42,89 @@ func init() { // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { if err := s.AddGeneratedConversionFunc((*CoschedulingArgs)(nil), (*config.CoschedulingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_CoschedulingArgs_To_config_CoschedulingArgs(a.(*CoschedulingArgs), b.(*config.CoschedulingArgs), scope) + return Convert_v1beta3_CoschedulingArgs_To_config_CoschedulingArgs(a.(*CoschedulingArgs), b.(*config.CoschedulingArgs), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*config.CoschedulingArgs)(nil), (*CoschedulingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_CoschedulingArgs_To_v1beta2_CoschedulingArgs(a.(*config.CoschedulingArgs), b.(*CoschedulingArgs), scope) + return Convert_config_CoschedulingArgs_To_v1beta3_CoschedulingArgs(a.(*config.CoschedulingArgs), b.(*CoschedulingArgs), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*DeviceShareArgs)(nil), (*config.DeviceShareArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DeviceShareArgs_To_config_DeviceShareArgs(a.(*DeviceShareArgs), b.(*config.DeviceShareArgs), scope) + return Convert_v1beta3_DeviceShareArgs_To_config_DeviceShareArgs(a.(*DeviceShareArgs), b.(*config.DeviceShareArgs), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*config.DeviceShareArgs)(nil), (*DeviceShareArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_DeviceShareArgs_To_v1beta2_DeviceShareArgs(a.(*config.DeviceShareArgs), b.(*DeviceShareArgs), scope) + return Convert_config_DeviceShareArgs_To_v1beta3_DeviceShareArgs(a.(*config.DeviceShareArgs), b.(*DeviceShareArgs), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*ElasticQuotaArgs)(nil), (*config.ElasticQuotaArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ElasticQuotaArgs_To_config_ElasticQuotaArgs(a.(*ElasticQuotaArgs), b.(*config.ElasticQuotaArgs), scope) + return Convert_v1beta3_ElasticQuotaArgs_To_config_ElasticQuotaArgs(a.(*ElasticQuotaArgs), b.(*config.ElasticQuotaArgs), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*config.ElasticQuotaArgs)(nil), (*ElasticQuotaArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ElasticQuotaArgs_To_v1beta2_ElasticQuotaArgs(a.(*config.ElasticQuotaArgs), b.(*ElasticQuotaArgs), scope) + return Convert_config_ElasticQuotaArgs_To_v1beta3_ElasticQuotaArgs(a.(*config.ElasticQuotaArgs), b.(*ElasticQuotaArgs), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*LoadAwareSchedulingAggregatedArgs)(nil), (*config.LoadAwareSchedulingAggregatedArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_LoadAwareSchedulingAggregatedArgs_To_config_LoadAwareSchedulingAggregatedArgs(a.(*LoadAwareSchedulingAggregatedArgs), b.(*config.LoadAwareSchedulingAggregatedArgs), scope) + return Convert_v1beta3_LoadAwareSchedulingAggregatedArgs_To_config_LoadAwareSchedulingAggregatedArgs(a.(*LoadAwareSchedulingAggregatedArgs), b.(*config.LoadAwareSchedulingAggregatedArgs), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*config.LoadAwareSchedulingAggregatedArgs)(nil), (*LoadAwareSchedulingAggregatedArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_LoadAwareSchedulingAggregatedArgs_To_v1beta2_LoadAwareSchedulingAggregatedArgs(a.(*config.LoadAwareSchedulingAggregatedArgs), b.(*LoadAwareSchedulingAggregatedArgs), scope) + return Convert_config_LoadAwareSchedulingAggregatedArgs_To_v1beta3_LoadAwareSchedulingAggregatedArgs(a.(*config.LoadAwareSchedulingAggregatedArgs), b.(*LoadAwareSchedulingAggregatedArgs), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*config.LoadAwareSchedulingArgs)(nil), (*LoadAwareSchedulingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_LoadAwareSchedulingArgs_To_v1beta2_LoadAwareSchedulingArgs(a.(*config.LoadAwareSchedulingArgs), b.(*LoadAwareSchedulingArgs), scope) + return Convert_config_LoadAwareSchedulingArgs_To_v1beta3_LoadAwareSchedulingArgs(a.(*config.LoadAwareSchedulingArgs), b.(*LoadAwareSchedulingArgs), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*NodeNUMAResourceArgs)(nil), (*config.NodeNUMAResourceArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_NodeNUMAResourceArgs_To_config_NodeNUMAResourceArgs(a.(*NodeNUMAResourceArgs), b.(*config.NodeNUMAResourceArgs), scope) + return Convert_v1beta3_NodeNUMAResourceArgs_To_config_NodeNUMAResourceArgs(a.(*NodeNUMAResourceArgs), b.(*config.NodeNUMAResourceArgs), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*config.NodeNUMAResourceArgs)(nil), (*NodeNUMAResourceArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeNUMAResourceArgs_To_v1beta2_NodeNUMAResourceArgs(a.(*config.NodeNUMAResourceArgs), b.(*NodeNUMAResourceArgs), scope) + return Convert_config_NodeNUMAResourceArgs_To_v1beta3_NodeNUMAResourceArgs(a.(*config.NodeNUMAResourceArgs), b.(*NodeNUMAResourceArgs), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*ReservationArgs)(nil), (*config.ReservationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ReservationArgs_To_config_ReservationArgs(a.(*ReservationArgs), b.(*config.ReservationArgs), scope) + return Convert_v1beta3_ReservationArgs_To_config_ReservationArgs(a.(*ReservationArgs), b.(*config.ReservationArgs), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*config.ReservationArgs)(nil), (*ReservationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ReservationArgs_To_v1beta2_ReservationArgs(a.(*config.ReservationArgs), b.(*ReservationArgs), scope) + return Convert_config_ReservationArgs_To_v1beta3_ReservationArgs(a.(*config.ReservationArgs), b.(*ReservationArgs), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*ScoringStrategy)(nil), (*config.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(a.(*ScoringStrategy), b.(*config.ScoringStrategy), scope) + return Convert_v1beta3_ScoringStrategy_To_config_ScoringStrategy(a.(*ScoringStrategy), b.(*config.ScoringStrategy), scope) }); err != nil { return err } if err := s.AddGeneratedConversionFunc((*config.ScoringStrategy)(nil), (*ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(a.(*config.ScoringStrategy), b.(*ScoringStrategy), scope) + return Convert_config_ScoringStrategy_To_v1beta3_ScoringStrategy(a.(*config.ScoringStrategy), b.(*ScoringStrategy), scope) }); err != nil { return err } if err := s.AddConversionFunc((*LoadAwareSchedulingArgs)(nil), (*config.LoadAwareSchedulingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(a.(*LoadAwareSchedulingArgs), b.(*config.LoadAwareSchedulingArgs), scope) + return Convert_v1beta3_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(a.(*LoadAwareSchedulingArgs), b.(*config.LoadAwareSchedulingArgs), scope) }); err != nil { return err } return nil } -func autoConvert_v1beta2_CoschedulingArgs_To_config_CoschedulingArgs(in *CoschedulingArgs, out *config.CoschedulingArgs, s conversion.Scope) error { +func autoConvert_v1beta3_CoschedulingArgs_To_config_CoschedulingArgs(in *CoschedulingArgs, out *config.CoschedulingArgs, s conversion.Scope) error { if err := v1.Convert_Pointer_v1_Duration_To_v1_Duration(&in.DefaultTimeout, &out.DefaultTimeout, s); err != nil { return err } @@ -137,12 +137,12 @@ func autoConvert_v1beta2_CoschedulingArgs_To_config_CoschedulingArgs(in *Cosched return nil } -// Convert_v1beta2_CoschedulingArgs_To_config_CoschedulingArgs is an autogenerated conversion function. -func Convert_v1beta2_CoschedulingArgs_To_config_CoschedulingArgs(in *CoschedulingArgs, out *config.CoschedulingArgs, s conversion.Scope) error { - return autoConvert_v1beta2_CoschedulingArgs_To_config_CoschedulingArgs(in, out, s) +// Convert_v1beta3_CoschedulingArgs_To_config_CoschedulingArgs is an autogenerated conversion function. +func Convert_v1beta3_CoschedulingArgs_To_config_CoschedulingArgs(in *CoschedulingArgs, out *config.CoschedulingArgs, s conversion.Scope) error { + return autoConvert_v1beta3_CoschedulingArgs_To_config_CoschedulingArgs(in, out, s) } -func autoConvert_config_CoschedulingArgs_To_v1beta2_CoschedulingArgs(in *config.CoschedulingArgs, out *CoschedulingArgs, s conversion.Scope) error { +func autoConvert_config_CoschedulingArgs_To_v1beta3_CoschedulingArgs(in *config.CoschedulingArgs, out *CoschedulingArgs, s conversion.Scope) error { if err := v1.Convert_v1_Duration_To_Pointer_v1_Duration(&in.DefaultTimeout, &out.DefaultTimeout, s); err != nil { return err } @@ -155,34 +155,34 @@ func autoConvert_config_CoschedulingArgs_To_v1beta2_CoschedulingArgs(in *config. return nil } -// Convert_config_CoschedulingArgs_To_v1beta2_CoschedulingArgs is an autogenerated conversion function. -func Convert_config_CoschedulingArgs_To_v1beta2_CoschedulingArgs(in *config.CoschedulingArgs, out *CoschedulingArgs, s conversion.Scope) error { - return autoConvert_config_CoschedulingArgs_To_v1beta2_CoschedulingArgs(in, out, s) +// Convert_config_CoschedulingArgs_To_v1beta3_CoschedulingArgs is an autogenerated conversion function. +func Convert_config_CoschedulingArgs_To_v1beta3_CoschedulingArgs(in *config.CoschedulingArgs, out *CoschedulingArgs, s conversion.Scope) error { + return autoConvert_config_CoschedulingArgs_To_v1beta3_CoschedulingArgs(in, out, s) } -func autoConvert_v1beta2_DeviceShareArgs_To_config_DeviceShareArgs(in *DeviceShareArgs, out *config.DeviceShareArgs, s conversion.Scope) error { +func autoConvert_v1beta3_DeviceShareArgs_To_config_DeviceShareArgs(in *DeviceShareArgs, out *config.DeviceShareArgs, s conversion.Scope) error { out.Allocator = in.Allocator out.ScoringStrategy = (*config.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) return nil } -// Convert_v1beta2_DeviceShareArgs_To_config_DeviceShareArgs is an autogenerated conversion function. -func Convert_v1beta2_DeviceShareArgs_To_config_DeviceShareArgs(in *DeviceShareArgs, out *config.DeviceShareArgs, s conversion.Scope) error { - return autoConvert_v1beta2_DeviceShareArgs_To_config_DeviceShareArgs(in, out, s) +// Convert_v1beta3_DeviceShareArgs_To_config_DeviceShareArgs is an autogenerated conversion function. +func Convert_v1beta3_DeviceShareArgs_To_config_DeviceShareArgs(in *DeviceShareArgs, out *config.DeviceShareArgs, s conversion.Scope) error { + return autoConvert_v1beta3_DeviceShareArgs_To_config_DeviceShareArgs(in, out, s) } -func autoConvert_config_DeviceShareArgs_To_v1beta2_DeviceShareArgs(in *config.DeviceShareArgs, out *DeviceShareArgs, s conversion.Scope) error { +func autoConvert_config_DeviceShareArgs_To_v1beta3_DeviceShareArgs(in *config.DeviceShareArgs, out *DeviceShareArgs, s conversion.Scope) error { out.Allocator = in.Allocator out.ScoringStrategy = (*ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) return nil } -// Convert_config_DeviceShareArgs_To_v1beta2_DeviceShareArgs is an autogenerated conversion function. -func Convert_config_DeviceShareArgs_To_v1beta2_DeviceShareArgs(in *config.DeviceShareArgs, out *DeviceShareArgs, s conversion.Scope) error { - return autoConvert_config_DeviceShareArgs_To_v1beta2_DeviceShareArgs(in, out, s) +// Convert_config_DeviceShareArgs_To_v1beta3_DeviceShareArgs is an autogenerated conversion function. +func Convert_config_DeviceShareArgs_To_v1beta3_DeviceShareArgs(in *config.DeviceShareArgs, out *DeviceShareArgs, s conversion.Scope) error { + return autoConvert_config_DeviceShareArgs_To_v1beta3_DeviceShareArgs(in, out, s) } -func autoConvert_v1beta2_ElasticQuotaArgs_To_config_ElasticQuotaArgs(in *ElasticQuotaArgs, out *config.ElasticQuotaArgs, s conversion.Scope) error { +func autoConvert_v1beta3_ElasticQuotaArgs_To_config_ElasticQuotaArgs(in *ElasticQuotaArgs, out *config.ElasticQuotaArgs, s conversion.Scope) error { if err := v1.Convert_Pointer_v1_Duration_To_v1_Duration(&in.DelayEvictTime, &out.DelayEvictTime, s); err != nil { return err } @@ -204,12 +204,12 @@ func autoConvert_v1beta2_ElasticQuotaArgs_To_config_ElasticQuotaArgs(in *Elastic return nil } -// Convert_v1beta2_ElasticQuotaArgs_To_config_ElasticQuotaArgs is an autogenerated conversion function. -func Convert_v1beta2_ElasticQuotaArgs_To_config_ElasticQuotaArgs(in *ElasticQuotaArgs, out *config.ElasticQuotaArgs, s conversion.Scope) error { - return autoConvert_v1beta2_ElasticQuotaArgs_To_config_ElasticQuotaArgs(in, out, s) +// Convert_v1beta3_ElasticQuotaArgs_To_config_ElasticQuotaArgs is an autogenerated conversion function. +func Convert_v1beta3_ElasticQuotaArgs_To_config_ElasticQuotaArgs(in *ElasticQuotaArgs, out *config.ElasticQuotaArgs, s conversion.Scope) error { + return autoConvert_v1beta3_ElasticQuotaArgs_To_config_ElasticQuotaArgs(in, out, s) } -func autoConvert_config_ElasticQuotaArgs_To_v1beta2_ElasticQuotaArgs(in *config.ElasticQuotaArgs, out *ElasticQuotaArgs, s conversion.Scope) error { +func autoConvert_config_ElasticQuotaArgs_To_v1beta3_ElasticQuotaArgs(in *config.ElasticQuotaArgs, out *ElasticQuotaArgs, s conversion.Scope) error { if err := v1.Convert_v1_Duration_To_Pointer_v1_Duration(&in.DelayEvictTime, &out.DelayEvictTime, s); err != nil { return err } @@ -231,12 +231,12 @@ func autoConvert_config_ElasticQuotaArgs_To_v1beta2_ElasticQuotaArgs(in *config. return nil } -// Convert_config_ElasticQuotaArgs_To_v1beta2_ElasticQuotaArgs is an autogenerated conversion function. -func Convert_config_ElasticQuotaArgs_To_v1beta2_ElasticQuotaArgs(in *config.ElasticQuotaArgs, out *ElasticQuotaArgs, s conversion.Scope) error { - return autoConvert_config_ElasticQuotaArgs_To_v1beta2_ElasticQuotaArgs(in, out, s) +// Convert_config_ElasticQuotaArgs_To_v1beta3_ElasticQuotaArgs is an autogenerated conversion function. +func Convert_config_ElasticQuotaArgs_To_v1beta3_ElasticQuotaArgs(in *config.ElasticQuotaArgs, out *ElasticQuotaArgs, s conversion.Scope) error { + return autoConvert_config_ElasticQuotaArgs_To_v1beta3_ElasticQuotaArgs(in, out, s) } -func autoConvert_v1beta2_LoadAwareSchedulingAggregatedArgs_To_config_LoadAwareSchedulingAggregatedArgs(in *LoadAwareSchedulingAggregatedArgs, out *config.LoadAwareSchedulingAggregatedArgs, s conversion.Scope) error { +func autoConvert_v1beta3_LoadAwareSchedulingAggregatedArgs_To_config_LoadAwareSchedulingAggregatedArgs(in *LoadAwareSchedulingAggregatedArgs, out *config.LoadAwareSchedulingAggregatedArgs, s conversion.Scope) error { out.UsageThresholds = *(*map[corev1.ResourceName]int64)(unsafe.Pointer(&in.UsageThresholds)) out.UsageAggregationType = extension.AggregationType(in.UsageAggregationType) if err := v1.Convert_Pointer_v1_Duration_To_v1_Duration(&in.UsageAggregatedDuration, &out.UsageAggregatedDuration, s); err != nil { @@ -249,12 +249,12 @@ func autoConvert_v1beta2_LoadAwareSchedulingAggregatedArgs_To_config_LoadAwareSc return nil } -// Convert_v1beta2_LoadAwareSchedulingAggregatedArgs_To_config_LoadAwareSchedulingAggregatedArgs is an autogenerated conversion function. -func Convert_v1beta2_LoadAwareSchedulingAggregatedArgs_To_config_LoadAwareSchedulingAggregatedArgs(in *LoadAwareSchedulingAggregatedArgs, out *config.LoadAwareSchedulingAggregatedArgs, s conversion.Scope) error { - return autoConvert_v1beta2_LoadAwareSchedulingAggregatedArgs_To_config_LoadAwareSchedulingAggregatedArgs(in, out, s) +// Convert_v1beta3_LoadAwareSchedulingAggregatedArgs_To_config_LoadAwareSchedulingAggregatedArgs is an autogenerated conversion function. +func Convert_v1beta3_LoadAwareSchedulingAggregatedArgs_To_config_LoadAwareSchedulingAggregatedArgs(in *LoadAwareSchedulingAggregatedArgs, out *config.LoadAwareSchedulingAggregatedArgs, s conversion.Scope) error { + return autoConvert_v1beta3_LoadAwareSchedulingAggregatedArgs_To_config_LoadAwareSchedulingAggregatedArgs(in, out, s) } -func autoConvert_config_LoadAwareSchedulingAggregatedArgs_To_v1beta2_LoadAwareSchedulingAggregatedArgs(in *config.LoadAwareSchedulingAggregatedArgs, out *LoadAwareSchedulingAggregatedArgs, s conversion.Scope) error { +func autoConvert_config_LoadAwareSchedulingAggregatedArgs_To_v1beta3_LoadAwareSchedulingAggregatedArgs(in *config.LoadAwareSchedulingAggregatedArgs, out *LoadAwareSchedulingAggregatedArgs, s conversion.Scope) error { out.UsageThresholds = *(*map[corev1.ResourceName]int64)(unsafe.Pointer(&in.UsageThresholds)) out.UsageAggregationType = extension.AggregationType(in.UsageAggregationType) if err := v1.Convert_v1_Duration_To_Pointer_v1_Duration(&in.UsageAggregatedDuration, &out.UsageAggregatedDuration, s); err != nil { @@ -267,12 +267,12 @@ func autoConvert_config_LoadAwareSchedulingAggregatedArgs_To_v1beta2_LoadAwareSc return nil } -// Convert_config_LoadAwareSchedulingAggregatedArgs_To_v1beta2_LoadAwareSchedulingAggregatedArgs is an autogenerated conversion function. -func Convert_config_LoadAwareSchedulingAggregatedArgs_To_v1beta2_LoadAwareSchedulingAggregatedArgs(in *config.LoadAwareSchedulingAggregatedArgs, out *LoadAwareSchedulingAggregatedArgs, s conversion.Scope) error { - return autoConvert_config_LoadAwareSchedulingAggregatedArgs_To_v1beta2_LoadAwareSchedulingAggregatedArgs(in, out, s) +// Convert_config_LoadAwareSchedulingAggregatedArgs_To_v1beta3_LoadAwareSchedulingAggregatedArgs is an autogenerated conversion function. +func Convert_config_LoadAwareSchedulingAggregatedArgs_To_v1beta3_LoadAwareSchedulingAggregatedArgs(in *config.LoadAwareSchedulingAggregatedArgs, out *LoadAwareSchedulingAggregatedArgs, s conversion.Scope) error { + return autoConvert_config_LoadAwareSchedulingAggregatedArgs_To_v1beta3_LoadAwareSchedulingAggregatedArgs(in, out, s) } -func autoConvert_v1beta2_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(in *LoadAwareSchedulingArgs, out *config.LoadAwareSchedulingArgs, s conversion.Scope) error { +func autoConvert_v1beta3_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(in *LoadAwareSchedulingArgs, out *config.LoadAwareSchedulingArgs, s conversion.Scope) error { out.FilterExpiredNodeMetrics = (*bool)(unsafe.Pointer(in.FilterExpiredNodeMetrics)) out.NodeMetricExpirationSeconds = (*int64)(unsafe.Pointer(in.NodeMetricExpirationSeconds)) out.ResourceWeights = *(*map[corev1.ResourceName]int64)(unsafe.Pointer(&in.ResourceWeights)) @@ -286,7 +286,7 @@ func autoConvert_v1beta2_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingAr if in.Aggregated != nil { in, out := &in.Aggregated, &out.Aggregated *out = new(config.LoadAwareSchedulingAggregatedArgs) - if err := Convert_v1beta2_LoadAwareSchedulingAggregatedArgs_To_config_LoadAwareSchedulingAggregatedArgs(*in, *out, s); err != nil { + if err := Convert_v1beta3_LoadAwareSchedulingAggregatedArgs_To_config_LoadAwareSchedulingAggregatedArgs(*in, *out, s); err != nil { return err } } else { @@ -295,7 +295,7 @@ func autoConvert_v1beta2_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingAr return nil } -func autoConvert_config_LoadAwareSchedulingArgs_To_v1beta2_LoadAwareSchedulingArgs(in *config.LoadAwareSchedulingArgs, out *LoadAwareSchedulingArgs, s conversion.Scope) error { +func autoConvert_config_LoadAwareSchedulingArgs_To_v1beta3_LoadAwareSchedulingArgs(in *config.LoadAwareSchedulingArgs, out *LoadAwareSchedulingArgs, s conversion.Scope) error { out.FilterExpiredNodeMetrics = (*bool)(unsafe.Pointer(in.FilterExpiredNodeMetrics)) out.NodeMetricExpirationSeconds = (*int64)(unsafe.Pointer(in.NodeMetricExpirationSeconds)) out.ResourceWeights = *(*map[corev1.ResourceName]int64)(unsafe.Pointer(&in.ResourceWeights)) @@ -309,7 +309,7 @@ func autoConvert_config_LoadAwareSchedulingArgs_To_v1beta2_LoadAwareSchedulingAr if in.Aggregated != nil { in, out := &in.Aggregated, &out.Aggregated *out = new(LoadAwareSchedulingAggregatedArgs) - if err := Convert_config_LoadAwareSchedulingAggregatedArgs_To_v1beta2_LoadAwareSchedulingAggregatedArgs(*in, *out, s); err != nil { + if err := Convert_config_LoadAwareSchedulingAggregatedArgs_To_v1beta3_LoadAwareSchedulingAggregatedArgs(*in, *out, s); err != nil { return err } } else { @@ -318,12 +318,12 @@ func autoConvert_config_LoadAwareSchedulingArgs_To_v1beta2_LoadAwareSchedulingAr return nil } -// Convert_config_LoadAwareSchedulingArgs_To_v1beta2_LoadAwareSchedulingArgs is an autogenerated conversion function. -func Convert_config_LoadAwareSchedulingArgs_To_v1beta2_LoadAwareSchedulingArgs(in *config.LoadAwareSchedulingArgs, out *LoadAwareSchedulingArgs, s conversion.Scope) error { - return autoConvert_config_LoadAwareSchedulingArgs_To_v1beta2_LoadAwareSchedulingArgs(in, out, s) +// Convert_config_LoadAwareSchedulingArgs_To_v1beta3_LoadAwareSchedulingArgs is an autogenerated conversion function. +func Convert_config_LoadAwareSchedulingArgs_To_v1beta3_LoadAwareSchedulingArgs(in *config.LoadAwareSchedulingArgs, out *LoadAwareSchedulingArgs, s conversion.Scope) error { + return autoConvert_config_LoadAwareSchedulingArgs_To_v1beta3_LoadAwareSchedulingArgs(in, out, s) } -func autoConvert_v1beta2_NodeNUMAResourceArgs_To_config_NodeNUMAResourceArgs(in *NodeNUMAResourceArgs, out *config.NodeNUMAResourceArgs, s conversion.Scope) error { +func autoConvert_v1beta3_NodeNUMAResourceArgs_To_config_NodeNUMAResourceArgs(in *NodeNUMAResourceArgs, out *config.NodeNUMAResourceArgs, s conversion.Scope) error { if err := v1.Convert_Pointer_string_To_string(&in.DefaultCPUBindPolicy, &out.DefaultCPUBindPolicy, s); err != nil { return err } @@ -332,12 +332,12 @@ func autoConvert_v1beta2_NodeNUMAResourceArgs_To_config_NodeNUMAResourceArgs(in return nil } -// Convert_v1beta2_NodeNUMAResourceArgs_To_config_NodeNUMAResourceArgs is an autogenerated conversion function. -func Convert_v1beta2_NodeNUMAResourceArgs_To_config_NodeNUMAResourceArgs(in *NodeNUMAResourceArgs, out *config.NodeNUMAResourceArgs, s conversion.Scope) error { - return autoConvert_v1beta2_NodeNUMAResourceArgs_To_config_NodeNUMAResourceArgs(in, out, s) +// Convert_v1beta3_NodeNUMAResourceArgs_To_config_NodeNUMAResourceArgs is an autogenerated conversion function. +func Convert_v1beta3_NodeNUMAResourceArgs_To_config_NodeNUMAResourceArgs(in *NodeNUMAResourceArgs, out *config.NodeNUMAResourceArgs, s conversion.Scope) error { + return autoConvert_v1beta3_NodeNUMAResourceArgs_To_config_NodeNUMAResourceArgs(in, out, s) } -func autoConvert_config_NodeNUMAResourceArgs_To_v1beta2_NodeNUMAResourceArgs(in *config.NodeNUMAResourceArgs, out *NodeNUMAResourceArgs, s conversion.Scope) error { +func autoConvert_config_NodeNUMAResourceArgs_To_v1beta3_NodeNUMAResourceArgs(in *config.NodeNUMAResourceArgs, out *NodeNUMAResourceArgs, s conversion.Scope) error { if err := v1.Convert_string_To_Pointer_string(&in.DefaultCPUBindPolicy, &out.DefaultCPUBindPolicy, s); err != nil { return err } @@ -346,53 +346,53 @@ func autoConvert_config_NodeNUMAResourceArgs_To_v1beta2_NodeNUMAResourceArgs(in return nil } -// Convert_config_NodeNUMAResourceArgs_To_v1beta2_NodeNUMAResourceArgs is an autogenerated conversion function. -func Convert_config_NodeNUMAResourceArgs_To_v1beta2_NodeNUMAResourceArgs(in *config.NodeNUMAResourceArgs, out *NodeNUMAResourceArgs, s conversion.Scope) error { - return autoConvert_config_NodeNUMAResourceArgs_To_v1beta2_NodeNUMAResourceArgs(in, out, s) +// Convert_config_NodeNUMAResourceArgs_To_v1beta3_NodeNUMAResourceArgs is an autogenerated conversion function. +func Convert_config_NodeNUMAResourceArgs_To_v1beta3_NodeNUMAResourceArgs(in *config.NodeNUMAResourceArgs, out *NodeNUMAResourceArgs, s conversion.Scope) error { + return autoConvert_config_NodeNUMAResourceArgs_To_v1beta3_NodeNUMAResourceArgs(in, out, s) } -func autoConvert_v1beta2_ReservationArgs_To_config_ReservationArgs(in *ReservationArgs, out *config.ReservationArgs, s conversion.Scope) error { +func autoConvert_v1beta3_ReservationArgs_To_config_ReservationArgs(in *ReservationArgs, out *config.ReservationArgs, s conversion.Scope) error { if err := v1.Convert_Pointer_bool_To_bool(&in.EnablePreemption, &out.EnablePreemption, s); err != nil { return err } return nil } -// Convert_v1beta2_ReservationArgs_To_config_ReservationArgs is an autogenerated conversion function. -func Convert_v1beta2_ReservationArgs_To_config_ReservationArgs(in *ReservationArgs, out *config.ReservationArgs, s conversion.Scope) error { - return autoConvert_v1beta2_ReservationArgs_To_config_ReservationArgs(in, out, s) +// Convert_v1beta3_ReservationArgs_To_config_ReservationArgs is an autogenerated conversion function. +func Convert_v1beta3_ReservationArgs_To_config_ReservationArgs(in *ReservationArgs, out *config.ReservationArgs, s conversion.Scope) error { + return autoConvert_v1beta3_ReservationArgs_To_config_ReservationArgs(in, out, s) } -func autoConvert_config_ReservationArgs_To_v1beta2_ReservationArgs(in *config.ReservationArgs, out *ReservationArgs, s conversion.Scope) error { +func autoConvert_config_ReservationArgs_To_v1beta3_ReservationArgs(in *config.ReservationArgs, out *ReservationArgs, s conversion.Scope) error { if err := v1.Convert_bool_To_Pointer_bool(&in.EnablePreemption, &out.EnablePreemption, s); err != nil { return err } return nil } -// Convert_config_ReservationArgs_To_v1beta2_ReservationArgs is an autogenerated conversion function. -func Convert_config_ReservationArgs_To_v1beta2_ReservationArgs(in *config.ReservationArgs, out *ReservationArgs, s conversion.Scope) error { - return autoConvert_config_ReservationArgs_To_v1beta2_ReservationArgs(in, out, s) +// Convert_config_ReservationArgs_To_v1beta3_ReservationArgs is an autogenerated conversion function. +func Convert_config_ReservationArgs_To_v1beta3_ReservationArgs(in *config.ReservationArgs, out *ReservationArgs, s conversion.Scope) error { + return autoConvert_config_ReservationArgs_To_v1beta3_ReservationArgs(in, out, s) } -func autoConvert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(in *ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { +func autoConvert_v1beta3_ScoringStrategy_To_config_ScoringStrategy(in *ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { out.Type = config.ScoringStrategyType(in.Type) out.Resources = *(*[]apisconfig.ResourceSpec)(unsafe.Pointer(&in.Resources)) return nil } -// Convert_v1beta2_ScoringStrategy_To_config_ScoringStrategy is an autogenerated conversion function. -func Convert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(in *ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - return autoConvert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(in, out, s) +// Convert_v1beta3_ScoringStrategy_To_config_ScoringStrategy is an autogenerated conversion function. +func Convert_v1beta3_ScoringStrategy_To_config_ScoringStrategy(in *ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { + return autoConvert_v1beta3_ScoringStrategy_To_config_ScoringStrategy(in, out, s) } -func autoConvert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(in *config.ScoringStrategy, out *ScoringStrategy, s conversion.Scope) error { +func autoConvert_config_ScoringStrategy_To_v1beta3_ScoringStrategy(in *config.ScoringStrategy, out *ScoringStrategy, s conversion.Scope) error { out.Type = ScoringStrategyType(in.Type) - out.Resources = *(*[]configv1beta2.ResourceSpec)(unsafe.Pointer(&in.Resources)) + out.Resources = *(*[]configv1beta3.ResourceSpec)(unsafe.Pointer(&in.Resources)) return nil } -// Convert_config_ScoringStrategy_To_v1beta2_ScoringStrategy is an autogenerated conversion function. -func Convert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(in *config.ScoringStrategy, out *ScoringStrategy, s conversion.Scope) error { - return autoConvert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(in, out, s) +// Convert_config_ScoringStrategy_To_v1beta3_ScoringStrategy is an autogenerated conversion function. +func Convert_config_ScoringStrategy_To_v1beta3_ScoringStrategy(in *config.ScoringStrategy, out *ScoringStrategy, s conversion.Scope) error { + return autoConvert_config_ScoringStrategy_To_v1beta3_ScoringStrategy(in, out, s) } diff --git a/pkg/scheduler/apis/config/v1beta2/zz_generated.deepcopy.go b/pkg/scheduler/apis/config/v1beta3/zz_generated.deepcopy.go similarity index 98% rename from pkg/scheduler/apis/config/v1beta2/zz_generated.deepcopy.go rename to pkg/scheduler/apis/config/v1beta3/zz_generated.deepcopy.go index e3e10ac84..78abb035c 100644 --- a/pkg/scheduler/apis/config/v1beta2/zz_generated.deepcopy.go +++ b/pkg/scheduler/apis/config/v1beta3/zz_generated.deepcopy.go @@ -19,13 +19,13 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1beta2 +package v1beta3 import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - configv1beta2 "k8s.io/kube-scheduler/config/v1beta2" + configv1beta3 "k8s.io/kube-scheduler/config/v1beta3" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -343,7 +343,7 @@ func (in *ScoringStrategy) DeepCopyInto(out *ScoringStrategy) { *out = *in if in.Resources != nil { in, out := &in.Resources, &out.Resources - *out = make([]configv1beta2.ResourceSpec, len(*in)) + *out = make([]configv1beta3.ResourceSpec, len(*in)) copy(*out, *in) } return diff --git a/pkg/scheduler/apis/config/v1beta2/zz_generated.defaults.go b/pkg/scheduler/apis/config/v1beta3/zz_generated.defaults.go similarity index 99% rename from pkg/scheduler/apis/config/v1beta2/zz_generated.defaults.go rename to pkg/scheduler/apis/config/v1beta3/zz_generated.defaults.go index d7aef16de..0d99373e6 100644 --- a/pkg/scheduler/apis/config/v1beta2/zz_generated.defaults.go +++ b/pkg/scheduler/apis/config/v1beta3/zz_generated.defaults.go @@ -19,7 +19,7 @@ limitations under the License. // Code generated by defaulter-gen. DO NOT EDIT. -package v1beta2 +package v1beta3 import ( runtime "k8s.io/apimachinery/pkg/runtime" diff --git a/pkg/scheduler/frameworkext/debug.go b/pkg/scheduler/frameworkext/debug.go index 392e1b35c..e2547529c 100644 --- a/pkg/scheduler/frameworkext/debug.go +++ b/pkg/scheduler/frameworkext/debug.go @@ -58,27 +58,20 @@ func DebugFiltersSetter(val string) (string, error) { return fmt.Sprintf("successfully set debugFilterFailure to %s", val), nil } -func debugScores(topN int, pod *corev1.Pod, pluginToNodeScores map[string]framework.NodeScoreList, nodes []*corev1.Node) prettytable.Writer { - // Summarize all scores. - result := make(framework.NodeScoreList, 0, len(nodes)) - nodeOrders := make(map[string]int, len(nodes)) - - for i, node := range nodes { - nodeOrders[node.Name] = i - result = append(result, framework.NodeScore{Name: node.Name, Score: 0}) - for j := range pluginToNodeScores { - result[i].Score += pluginToNodeScores[j][i].Score - } +func debugScores(topN int, pod *corev1.Pod, allNodePluginScores []framework.NodePluginScores, nodes []*corev1.Node) prettytable.Writer { + if len(allNodePluginScores) == 0 { + return nil } - sort.Slice(result, func(i, j int) bool { - return result[i].Score > result[j].Score + // Summarize all scores. + sort.Slice(allNodePluginScores, func(i, j int) bool { + return allNodePluginScores[i].TotalScore > allNodePluginScores[j].TotalScore }) - pluginNames := make([]string, 0, len(pluginToNodeScores)) - for name := range pluginToNodeScores { - pluginNames = append(pluginNames, name) + pluginNames := make([]string, 0, len(allNodePluginScores)) + pluginScores := allNodePluginScores[0].Scores + for _, v := range pluginScores { + pluginNames = append(pluginNames, v.Name) } - sort.Strings(pluginNames) w := prettytable.NewWriter() headerRow := prettytable.Row{"#", "Pod", "Node", "Score"} @@ -88,19 +81,13 @@ func debugScores(topN int, pod *corev1.Pod, pluginToNodeScores map[string]framew w.AppendHeader(headerRow) podRef := klog.KObj(pod) - for i, node := range result { + for i, nodeScore := range allNodePluginScores { if i >= topN { break } - row := prettytable.Row{strconv.Itoa(i), podRef.String(), node.Name, node.Score} - if nodeIndex, ok := nodeOrders[node.Name]; ok { - for _, pluginName := range pluginNames { - if scores, ok := pluginToNodeScores[pluginName]; ok { - row = append(row, scores[nodeIndex].Score) - } else { - row = append(row, -1) - } - } + row := prettytable.Row{strconv.Itoa(i), podRef.String(), nodeScore.Name, nodeScore.TotalScore} + for _, pluginScore := range nodeScore.Scores { + row = append(row, pluginScore.Score) } w.AppendRow(row) } diff --git a/pkg/scheduler/frameworkext/debug_test.go b/pkg/scheduler/frameworkext/debug_test.go index c5bd20c28..f5cbbd534 100644 --- a/pkg/scheduler/frameworkext/debug_test.go +++ b/pkg/scheduler/frameworkext/debug_test.go @@ -17,6 +17,7 @@ limitations under the License. package frameworkext import ( + "sort" "testing" "github.com/stretchr/testify/assert" @@ -165,7 +166,32 @@ func TestDebugScores(t *testing.T) { }, } - w := debugScores(4, pod, pluginToNodeScores, nodes) + m := map[string][]framework.PluginScore{} + for pluginName, nodeScores := range pluginToNodeScores { + for _, v := range nodeScores { + m[v.Name] = append(m[v.Name], framework.PluginScore{ + Name: pluginName, + Score: v.Score, + }) + } + } + allNodePluginScores := make([]framework.NodePluginScores, 0, len(m)) + for nodeName, pluginScores := range m { + sort.Slice(pluginScores, func(i, j int) bool { + return pluginScores[i].Name < pluginScores[j].Name + }) + var totalScore int64 + for _, v := range pluginScores { + totalScore += v.Score + } + allNodePluginScores = append(allNodePluginScores, framework.NodePluginScores{ + Name: nodeName, + Scores: pluginScores, + TotalScore: totalScore, + }) + } + + w := debugScores(4, pod, allNodePluginScores, nodes) expectedResult := `| # | Pod | Node | Score | ImageLocality | InterPodAffinity | LoadAwareScheduling | NodeAffinity | NodeNUMAResource | NodeResourcesBalancedAllocation | NodeResourcesFit | PodTopologySpread | Reservation | TaintToleration | | --- | --- | --- | ---:| ---:| ---:| ---:| ---:| ---:| ---:| ---:| ---:| ---:| ---:| | 0 | default/curlimage-545745d8f8-rngp7 | cn-hangzhou.10.0.4.51 | 577 | 0 | 0 | 87 | 0 | 0 | 96 | 94 | 200 | 0 | 100 | diff --git a/pkg/scheduler/frameworkext/errorhandler_dispatcher.go b/pkg/scheduler/frameworkext/errorhandler_dispatcher.go index df1d6a0a9..fae39ee0b 100644 --- a/pkg/scheduler/frameworkext/errorhandler_dispatcher.go +++ b/pkg/scheduler/frameworkext/errorhandler_dispatcher.go @@ -16,22 +16,30 @@ limitations under the License. package frameworkext -import "k8s.io/kubernetes/pkg/scheduler/framework" +import ( + "context" + "time" -type PreErrorHandlerFilter func(*framework.QueuedPodInfo, error) bool + "k8s.io/kubernetes/pkg/scheduler" + "k8s.io/kubernetes/pkg/scheduler/framework" +) + +// TODO: We should refactor these function types with scheduler.FailureHandlerFn + +type PreErrorHandlerFilter func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) bool type PostErrorHandlerFilter PreErrorHandlerFilter type errorHandlerDispatcher struct { preHandlerFilters []PreErrorHandlerFilter postHandlerFilters []PostErrorHandlerFilter - defaultHandler func(*framework.QueuedPodInfo, error) + defaultHandler scheduler.FailureHandlerFn } func newErrorHandlerDispatcher() *errorHandlerDispatcher { return &errorHandlerDispatcher{} } -func (d *errorHandlerDispatcher) setDefaultHandler(handler func(*framework.QueuedPodInfo, error)) { +func (d *errorHandlerDispatcher) setDefaultHandler(handler scheduler.FailureHandlerFn) { d.defaultHandler = handler } @@ -44,19 +52,19 @@ func (d *errorHandlerDispatcher) RegisterErrorHandlerFilters(preFilter PreErrorH } } -func (d *errorHandlerDispatcher) Error(podInfo *framework.QueuedPodInfo, err error) { +func (d *errorHandlerDispatcher) Error(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) { defer func() { for _, handlerFilter := range d.postHandlerFilters { - if handlerFilter(podInfo, err) { + if handlerFilter(ctx, fwk, podInfo, status, nominatingInfo, start) { return } } }() for _, handlerFilter := range d.preHandlerFilters { - if handlerFilter(podInfo, err) { + if handlerFilter(ctx, fwk, podInfo, status, nominatingInfo, start) { return } } - d.defaultHandler(podInfo, err) + d.defaultHandler(ctx, fwk, podInfo, status, nominatingInfo, start) } diff --git a/pkg/scheduler/frameworkext/errorhandler_dispatcher_test.go b/pkg/scheduler/frameworkext/errorhandler_dispatcher_test.go index 1cdde1cbe..f5535b3b9 100644 --- a/pkg/scheduler/frameworkext/errorhandler_dispatcher_test.go +++ b/pkg/scheduler/frameworkext/errorhandler_dispatcher_test.go @@ -17,7 +17,9 @@ limitations under the License. package frameworkext import ( + "context" "testing" + "time" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" @@ -28,21 +30,21 @@ import ( func TestErrorHandlerDispatcher(t *testing.T) { dispatcher := newErrorHandlerDispatcher() enterDefaultHandler := false - dispatcher.setDefaultHandler(func(info *framework.QueuedPodInfo, err error) { + dispatcher.setDefaultHandler(func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) { enterDefaultHandler = true }) handler1Processed := false afterHandler1Processed := false - dispatcher.RegisterErrorHandlerFilters(func(info *framework.QueuedPodInfo, err error) bool { - if info.Pod.Name == "handler1" { + dispatcher.RegisterErrorHandlerFilters(func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) bool { + if podInfo.Pod.Name == "handler1" { handler1Processed = true return true } return false - }, func(info *framework.QueuedPodInfo, err error) bool { - if info.Pod.Name == "handler1" { + }, func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) bool { + if podInfo.Pod.Name == "handler1" { afterHandler1Processed = true return true } @@ -51,21 +53,21 @@ func TestErrorHandlerDispatcher(t *testing.T) { handler2Processed := false afterHandler2Processed := false - dispatcher.RegisterErrorHandlerFilters(func(info *framework.QueuedPodInfo, err error) bool { - if info.Pod.Name == "handler2" { + dispatcher.RegisterErrorHandlerFilters(func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) bool { + if podInfo.Pod.Name == "handler2" { handler2Processed = true return true } return false - }, func(info *framework.QueuedPodInfo, err error) bool { - if info.Pod.Name == "handler2" { + }, func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) bool { + if podInfo.Pod.Name == "handler2" { afterHandler2Processed = true return true } return false }) - dispatcher.Error(&framework.QueuedPodInfo{ + podInfo := &framework.QueuedPodInfo{ PodInfo: &framework.PodInfo{ Pod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -73,11 +75,12 @@ func TestErrorHandlerDispatcher(t *testing.T) { }, }, }, - }, nil) + } + dispatcher.Error(context.TODO(), nil, podInfo, nil, nil, time.Now()) assert.True(t, enterDefaultHandler) enterDefaultHandler = false - dispatcher.Error(&framework.QueuedPodInfo{ + podInfo = &framework.QueuedPodInfo{ PodInfo: &framework.PodInfo{ Pod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -85,7 +88,8 @@ func TestErrorHandlerDispatcher(t *testing.T) { }, }, }, - }, nil) + } + dispatcher.Error(context.TODO(), nil, podInfo, nil, nil, time.Now()) assert.False(t, handler1Processed) assert.False(t, afterHandler1Processed) assert.True(t, handler2Processed) @@ -94,7 +98,7 @@ func TestErrorHandlerDispatcher(t *testing.T) { handler2Processed = false afterHandler2Processed = false - dispatcher.Error(&framework.QueuedPodInfo{ + podInfo = &framework.QueuedPodInfo{ PodInfo: &framework.PodInfo{ Pod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -102,7 +106,8 @@ func TestErrorHandlerDispatcher(t *testing.T) { }, }, }, - }, nil) + } + dispatcher.Error(context.TODO(), nil, podInfo, nil, nil, time.Now()) assert.True(t, handler1Processed) assert.True(t, afterHandler1Processed) assert.False(t, handler2Processed) diff --git a/pkg/scheduler/frameworkext/eventhandlers/reservation_handler.go b/pkg/scheduler/frameworkext/eventhandlers/reservation_handler.go index 564e3933b..e68935b84 100644 --- a/pkg/scheduler/frameworkext/eventhandlers/reservation_handler.go +++ b/pkg/scheduler/frameworkext/eventhandlers/reservation_handler.go @@ -18,6 +18,7 @@ package eventhandlers import ( "context" + "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -51,7 +52,7 @@ func MakeReservationErrorHandler( ) frameworkext.PreErrorHandlerFilter { reservationLister := koordSharedInformerFactory.Scheduling().V1alpha1().Reservations().Lister() reservationErrorFn := makeReservationErrorFunc(schedAdapter, reservationLister) - return func(podInfo *framework.QueuedPodInfo, schedulingErr error) bool { + return func(ctx context.Context, f framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) bool { pod := podInfo.Pod fwk, ok := sched.Profiles[pod.Spec.SchedulerName] if !ok { @@ -59,6 +60,8 @@ func MakeReservationErrorHandler( return true } + schedulingErr := status.AsError() + // if the pod is not a reserve pod, use the default error handler // If the Pod failed to schedule or no post-filter plugins, should remove exist NominatedReservation of the Pod. if _, ok := schedulingErr.(*framework.FitError); !ok || !fwk.HasPostFilterPlugins() { @@ -75,7 +78,7 @@ func MakeReservationErrorHandler( return false } - reservationErrorFn(podInfo, schedulingErr) + reservationErrorFn(ctx, fwk, podInfo, status, nominatingInfo, start) rName := reservationutil.GetReservationNameFromReservePod(pod) r, err := reservationLister.Get(rName) @@ -91,9 +94,10 @@ func MakeReservationErrorHandler( } } -func makeReservationErrorFunc(sched frameworkext.Scheduler, reservationLister schedulingv1alpha1lister.ReservationLister) func(*framework.QueuedPodInfo, error) { - return func(podInfo *framework.QueuedPodInfo, err error) { +func makeReservationErrorFunc(sched frameworkext.Scheduler, reservationLister schedulingv1alpha1lister.ReservationLister) scheduler.FailureHandlerFn { + return func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) { pod := podInfo.Pod + err := status.AsError() // NOTE: If the pod is a reserve pod, we simply check the corresponding reservation status if the reserve pod // need requeue for the next scheduling cycle. if err == scheduler.ErrNoNodesAvailable { @@ -121,8 +125,9 @@ func makeReservationErrorFunc(sched frameworkext.Scheduler, reservationLister sc "pod", klog.KObj(pod), "reservation", rName, "node", nodeName) return } - podInfo.PodInfo = framework.NewPodInfo(reservationutil.NewReservePod(cachedR)) - if err = sched.GetSchedulingQueue().AddUnschedulableIfNotPresent(podInfo, sched.GetSchedulingQueue().SchedulingCycle()); err != nil { + podInfo.PodInfo, _ = framework.NewPodInfo(reservationutil.NewReservePod(cachedR)) + logger := klog.FromContext(ctx) + if err = sched.GetSchedulingQueue().AddUnschedulableIfNotPresent(logger, podInfo, sched.GetSchedulingQueue().SchedulingCycle()); err != nil { klog.ErrorS(err, "Error occurred") } } @@ -278,12 +283,12 @@ func addReservationToSchedulerCache(sched frameworkext.Scheduler, obj interface{ // update pod cache and trigger pod assigned event for scheduling queue reservePod := reservationutil.NewReservePod(r) - if err = sched.GetCache().AddPod(reservePod); err != nil { + if err = sched.GetCache().AddPod(klog.Background(), reservePod); err != nil { klog.ErrorS(err, "Failed to add reservation into SchedulerCache", "reservation", klog.KObj(reservePod)) } else { klog.V(4).InfoS("Successfully add reservation into SchedulerCache", "reservation", klog.KObj(r)) } - sched.GetSchedulingQueue().AssignedPodAdded(reservePod) + sched.GetSchedulingQueue().AssignedPodAdded(klog.Background(), reservePod) } func updateReservationInSchedulerCache(sched frameworkext.Scheduler, oldObj, newObj interface{}) { @@ -340,12 +345,12 @@ func updateReservationInSchedulerCache(sched frameworkext.Scheduler, oldObj, new } oldReservePod := reservationutil.NewReservePod(oldR) newReservePod := reservationutil.NewReservePod(newR) - if err := sched.GetCache().UpdatePod(oldReservePod, newReservePod); err != nil { + if err := sched.GetCache().UpdatePod(klog.Background(), oldReservePod, newReservePod); err != nil { klog.ErrorS(err, "Failed to update reservation into SchedulerCache", "reservation", klog.KObj(newR)) } else { klog.V(4).InfoS("Successfully update reservation into SchedulerCache", "reservation", klog.KObj(newR)) } - sched.GetSchedulingQueue().AssignedPodUpdated(newReservePod) + sched.GetSchedulingQueue().AssignedPodUpdated(klog.Background(), oldReservePod, newReservePod) } func deleteReservationFromSchedulerCache(sched frameworkext.Scheduler, obj interface{}) { @@ -386,18 +391,18 @@ func deleteReservationFromSchedulerCache(sched frameworkext.Scheduler, obj inter util.ResetHostPorts(reservePod, allocatablePorts) // The Pod status in the Cache must be refreshed once to ensure that subsequent deletions are valid. - if err := sched.GetCache().UpdatePod(reservePod, reservePod); err != nil { + if err := sched.GetCache().UpdatePod(klog.Background(), reservePod, reservePod); err != nil { klog.ErrorS(err, "Failed update reservation into SchedulerCache in delete stage", "reservation", klog.KObj(r)) } } - if err := sched.GetCache().RemovePod(reservePod); err != nil { + if err := sched.GetCache().RemovePod(klog.Background(), reservePod); err != nil { klog.ErrorS(err, "Failed to remove reservation from SchedulerCache", "reservation", klog.KObj(r)) } else { klog.V(4).InfoS("Successfully delete reservation from SchedulerCache", "reservation", klog.KObj(r)) } - sched.GetSchedulingQueue().MoveAllToActiveOrBackoffQueue(frameworkext.AssignedPodDelete, nil) + sched.GetSchedulingQueue().MoveAllToActiveOrBackoffQueue(klog.Background(), frameworkext.AssignedPodDelete, nil, nil, nil) } } @@ -410,7 +415,7 @@ func addReservationToSchedulingQueue(sched frameworkext.Scheduler, obj interface klog.V(3).InfoS("Add event for unscheduled reservation", "reservation", klog.KObj(r)) reservePod := reservationutil.NewReservePod(r) - if err := sched.GetSchedulingQueue().Add(reservePod); err != nil { + if err := sched.GetSchedulingQueue().Add(klog.Background(), reservePod); err != nil { klog.Errorf("failed to add reserve pod into scheduling queue, reservation %v, err: %v", klog.KObj(reservePod), err) } } @@ -438,7 +443,7 @@ func updateReservationInSchedulingQueue(sched frameworkext.Scheduler, oldObj, ne } oldReservePod := reservationutil.NewReservePod(oldR) - if err = sched.GetSchedulingQueue().Update(oldReservePod, newReservePod); err != nil { + if err = sched.GetSchedulingQueue().Update(klog.Background(), oldReservePod, newReservePod); err != nil { klog.Errorf("failed to update reserve pod in scheduling queue, old %s, new %s, err: %v", klog.KObj(oldReservePod), klog.KObj(newReservePod), err) } } diff --git a/pkg/scheduler/frameworkext/eventhandlers/reservation_handler_test.go b/pkg/scheduler/frameworkext/eventhandlers/reservation_handler_test.go index e8b2ad287..b670f09ba 100644 --- a/pkg/scheduler/frameworkext/eventhandlers/reservation_handler_test.go +++ b/pkg/scheduler/frameworkext/eventhandlers/reservation_handler_test.go @@ -31,6 +31,7 @@ import ( "k8s.io/client-go/informers" kubefake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/kubernetes/pkg/scheduler" "k8s.io/kubernetes/pkg/scheduler/framework" @@ -97,7 +98,7 @@ func TestAddReservationErrorHandler(t *testing.T) { fakeRecorder := record.NewFakeRecorder(1024) eventRecorder := record.NewEventRecorderAdapter(fakeRecorder) - fh, err := schedulertesting.NewFramework(registeredPlugins, "koord-scheduler", + fh, err := schedulertesting.NewFramework(context.TODO(), registeredPlugins, "koord-scheduler", frameworkruntime.WithEventRecorder(eventRecorder), frameworkruntime.WithClientSet(kubefake.NewSimpleClientset()), frameworkruntime.WithInformerFactory(informers.NewSharedInformerFactory(kubefake.NewSimpleClientset(), 0)), @@ -117,12 +118,13 @@ func TestAddReservationErrorHandler(t *testing.T) { koordSharedInformerFactory.Start(nil) koordSharedInformerFactory.WaitForCacheSync(nil) + podInfo, _ := framework.NewPodInfo(testPod) queuedPodInfo := &framework.QueuedPodInfo{ - PodInfo: framework.NewPodInfo(testPod), + PodInfo: podInfo, } expectedErr := errors.New(strings.Repeat("test error", validation.NoteLengthLimit)) - handler(queuedPodInfo, expectedErr) + handler(context.TODO(), fh, queuedPodInfo, framework.AsStatus(expectedErr), nil, time.Now()) r, err := koordClientSet.SchedulingV1alpha1().Reservations().Get(context.TODO(), testR.Name, metav1.GetOptions{}) assert.NoError(t, err) @@ -667,7 +669,7 @@ func Test_deleteReservationFromCache(t *testing.T) { reservation.SetReservationCache(&fakeReservationCache{}) sched := frameworkext.NewFakeScheduler() if reservationutil.ValidateReservation(tt.obj) == nil { - sched.AddPod(reservationutil.NewReservePod(tt.obj)) + sched.AddPod(klog.Background(), reservationutil.NewReservePod(tt.obj)) } deleteReservationFromSchedulerCache(sched, tt.obj) pod, err := sched.GetPod(&corev1.Pod{ @@ -948,7 +950,7 @@ func Test_deleteReservationFromSchedulingQueue(t *testing.T) { return &fakePermitPlugin{}, nil }), } - fh, err := schedulertesting.NewFramework(registeredPlugins, corev1.DefaultSchedulerName) + fh, err := schedulertesting.NewFramework(context.TODO(), registeredPlugins, corev1.DefaultSchedulerName) assert.NoError(t, err) sched := &scheduler.Scheduler{ @@ -1016,7 +1018,7 @@ func Test_unscheduledReservationEventHandler(t *testing.T) { }, }, } - handler.OnAdd(reservation) + handler.OnAdd(reservation, true) assert.NotNil(t, adapt.Queue.Pods[string(reservation.UID)]) reservationCopy := reservation.DeepCopy() reservationCopy.ResourceVersion = "2" diff --git a/pkg/scheduler/frameworkext/framework_extender.go b/pkg/scheduler/frameworkext/framework_extender.go index ff61e2432..6df6646d0 100644 --- a/pkg/scheduler/frameworkext/framework_extender.go +++ b/pkg/scheduler/frameworkext/framework_extender.go @@ -234,7 +234,7 @@ func (ext *frameworkExtenderImpl) RunPostFilterPlugins(ctx context.Context, stat return result, status } -func (ext *frameworkExtenderImpl) RunScorePlugins(ctx context.Context, state *framework.CycleState, pod *corev1.Pod, nodes []*corev1.Node) (framework.PluginToNodeScores, *framework.Status) { +func (ext *frameworkExtenderImpl) RunScorePlugins(ctx context.Context, state *framework.CycleState, pod *corev1.Pod, nodes []*corev1.Node) ([]framework.NodePluginScores, *framework.Status) { for _, pl := range ext.configuredPlugins.Score.Enabled { transformer := ext.scoreTransformers[pl.Name] if transformer == nil { @@ -436,8 +436,8 @@ func (ext *frameworkExtenderImpl) RegisterForgetPodHandler(handler ForgetPodHand ext.forgetPodHandlers = append(ext.forgetPodHandlers, handler) } -func (ext *frameworkExtenderImpl) ForgetPod(pod *corev1.Pod) error { - if err := ext.Scheduler().GetCache().ForgetPod(pod); err != nil { +func (ext *frameworkExtenderImpl) ForgetPod(logger klog.Logger, pod *corev1.Pod) error { + if err := ext.Scheduler().GetCache().ForgetPod(logger, pod); err != nil { return err } for _, handler := range ext.forgetPodHandlers { diff --git a/pkg/scheduler/frameworkext/framework_extender_factory.go b/pkg/scheduler/frameworkext/framework_extender_factory.go index 6f130f7df..830b58609 100644 --- a/pkg/scheduler/frameworkext/framework_extender_factory.go +++ b/pkg/scheduler/frameworkext/framework_extender_factory.go @@ -18,6 +18,7 @@ package frameworkext import ( "context" + "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -143,11 +144,14 @@ func (f *FrameworkExtenderFactory) InitScheduler(sched Scheduler) { adaptor.Scheduler.SchedulePod = f.scheduleOne nextPod := adaptor.Scheduler.NextPod - adaptor.Scheduler.NextPod = func() *framework.QueuedPodInfo { - podInfo := nextPod() + adaptor.Scheduler.NextPod = func() (*framework.QueuedPodInfo, error) { + podInfo, err := nextPod() + if err != nil { + return podInfo, err + } // Deep copy podInfo to allow pod modification during scheduling podInfo = podInfo.DeepCopy() - return podInfo + return podInfo, nil } } } @@ -185,10 +189,10 @@ func (f *FrameworkExtenderFactory) scheduleOne(ctx context.Context, fwk framewor } func (f *FrameworkExtenderFactory) InterceptSchedulerError(sched *scheduler.Scheduler) { - f.errorHandlerDispatcher.setDefaultHandler(sched.Error) - sched.Error = func(info *framework.QueuedPodInfo, err error) { - f.errorHandlerDispatcher.Error(info, err) - f.monitor.Complete(info.Pod) + f.errorHandlerDispatcher.setDefaultHandler(sched.FailureHandler) + sched.FailureHandler = func(ctx context.Context, fwk framework.Framework, podInfo *framework.QueuedPodInfo, status *framework.Status, nominatingInfo *framework.NominatingInfo, start time.Time) { + f.errorHandlerDispatcher.Error(ctx, fwk, podInfo, status, nominatingInfo, start) + f.monitor.Complete(podInfo.Pod) } } diff --git a/pkg/scheduler/frameworkext/framework_extender_factory_test.go b/pkg/scheduler/frameworkext/framework_extender_factory_test.go index b9de56929..3ab78aa91 100644 --- a/pkg/scheduler/frameworkext/framework_extender_factory_test.go +++ b/pkg/scheduler/frameworkext/framework_extender_factory_test.go @@ -17,6 +17,7 @@ limitations under the License. package frameworkext import ( + "context" "testing" "github.com/gin-gonic/gin" @@ -55,6 +56,7 @@ func TestExtenderFactory(t *testing.T) { schedulertesting.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), } fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", frameworkruntime.WithSnapshotSharedLister(fakeNodeInfoLister{NodeInfoLister: frameworkfake.NodeInfoLister{}}), diff --git a/pkg/scheduler/frameworkext/framework_extender_test.go b/pkg/scheduler/frameworkext/framework_extender_test.go index 41dfb16bd..9c9503ae2 100644 --- a/pkg/scheduler/frameworkext/framework_extender_test.go +++ b/pkg/scheduler/frameworkext/framework_extender_test.go @@ -20,12 +20,15 @@ import ( "context" "errors" "fmt" + "sync" "testing" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler/framework" frameworkfake "k8s.io/kubernetes/pkg/scheduler/framework/fake" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder" @@ -182,6 +185,14 @@ func (c fakeNodeInfoLister) NodeInfos() framework.NodeInfoLister { return c } +func (c fakeNodeInfoLister) StorageInfos() framework.StorageInfoLister { + return c +} + +func (c fakeNodeInfoLister) IsPVCUsedByPods(key string) bool { + return false +} + func Test_frameworkExtenderImpl_RunPreFilterPlugins(t *testing.T) { tests := []struct { name string @@ -208,6 +219,7 @@ func Test_frameworkExtenderImpl_RunPreFilterPlugins(t *testing.T) { })), } fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", frameworkruntime.WithSnapshotSharedLister(fakeNodeInfoLister{NodeInfoLister: frameworkfake.NodeInfoLister{}}), @@ -256,12 +268,19 @@ func Test_frameworkExtenderImpl_RunFilterPluginsWithNominatedPods(t *testing.T) })), } fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", + frameworkruntime.WithPodNominator(NewPodNominator()), ) assert.NoError(t, err) frameworkExtender := extenderFactory.NewFrameworkExtender(fh) frameworkExtender.SetConfiguredPlugins(fh.ListPlugins()) + tt.nodeInfo.SetNode(&corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node-1", + }, + }) assert.Equal(t, tt.want, frameworkExtender.RunFilterPluginsWithNominatedPods(context.TODO(), framework.NewCycleState(), tt.pod, tt.nodeInfo)) assert.Len(t, tt.pod.Annotations, 2) expectedAnnotations := map[string]string{ @@ -273,21 +292,151 @@ func Test_frameworkExtenderImpl_RunFilterPluginsWithNominatedPods(t *testing.T) } } +// nominatedPodMap is a structure that stores pods nominated to run on nodes. +// It exists because nominatedNodeName of pod objects stored in the structure +// may be different than what scheduler has here. We should be able to find pods +// by their UID and update/delete them. +type nominatedPodMap struct { + // nominatedPods is a map keyed by a node name and the value is a list of + // pods which are nominated to run on the node. These are pods which can be in + // the activeQ or unschedulableQ. + nominatedPods map[string][]*framework.PodInfo + // nominatedPodToNode is map keyed by a Pod UID to the node name where it is + // nominated. + nominatedPodToNode map[types.UID]string + + sync.RWMutex +} + +func (npm *nominatedPodMap) add(pi *framework.PodInfo, nodeName string) { + // always delete the pod if it already exist, to ensure we never store more than + // one instance of the pod. + npm.delete(pi.Pod) + + nnn := nodeName + if len(nnn) == 0 { + nnn = NominatedNodeName(pi.Pod) + if len(nnn) == 0 { + return + } + } + npm.nominatedPodToNode[pi.Pod.UID] = nnn + for _, npi := range npm.nominatedPods[nnn] { + if npi.Pod.UID == pi.Pod.UID { + klog.V(4).InfoS("Pod already exists in the nominated map", "pod", klog.KObj(npi.Pod)) + return + } + } + npm.nominatedPods[nnn] = append(npm.nominatedPods[nnn], pi) +} + +func (npm *nominatedPodMap) delete(p *corev1.Pod) { + nnn, ok := npm.nominatedPodToNode[p.UID] + if !ok { + return + } + for i, np := range npm.nominatedPods[nnn] { + if np.Pod.UID == p.UID { + npm.nominatedPods[nnn] = append(npm.nominatedPods[nnn][:i], npm.nominatedPods[nnn][i+1:]...) + if len(npm.nominatedPods[nnn]) == 0 { + delete(npm.nominatedPods, nnn) + } + break + } + } + delete(npm.nominatedPodToNode, p.UID) +} + +// UpdateNominatedPod updates the with . +func (npm *nominatedPodMap) UpdateNominatedPod(logr klog.Logger, oldPod *corev1.Pod, newPodInfo *framework.PodInfo) { + npm.Lock() + defer npm.Unlock() + // In some cases, an Update event with no "NominatedNode" present is received right + // after a node("NominatedNode") is reserved for this pod in memory. + // In this case, we need to keep reserving the NominatedNode when updating the pod pointer. + nodeName := "" + // We won't fall into below `if` block if the Update event represents: + // (1) NominatedNode info is added + // (2) NominatedNode info is updated + // (3) NominatedNode info is removed + if NominatedNodeName(oldPod) == "" && NominatedNodeName(newPodInfo.Pod) == "" { + if nnn, ok := npm.nominatedPodToNode[oldPod.UID]; ok { + // This is the only case we should continue reserving the NominatedNode + nodeName = nnn + } + } + // We update irrespective of the nominatedNodeName changed or not, to ensure + // that pod pointer is updated. + npm.delete(oldPod) + npm.add(newPodInfo, nodeName) +} + +// NewPodNominator creates a nominatedPodMap as a backing of framework.PodNominator. +func NewPodNominator() framework.PodNominator { + return &nominatedPodMap{ + nominatedPods: make(map[string][]*framework.PodInfo), + nominatedPodToNode: make(map[types.UID]string), + } +} + +// NominatedNodeName returns nominated node name of a Pod. +func NominatedNodeName(pod *corev1.Pod) string { + return pod.Status.NominatedNodeName +} + +// DeleteNominatedPodIfExists deletes from nominatedPods. +func (npm *nominatedPodMap) DeleteNominatedPodIfExists(pod *corev1.Pod) { + npm.Lock() + npm.delete(pod) + npm.Unlock() +} + +// AddNominatedPod adds a pod to the nominated pods of the given node. +// This is called during the preemption process after a node is nominated to run +// the pod. We update the structure before sending a request to update the pod +// object to avoid races with the following scheduling cycles. +func (npm *nominatedPodMap) AddNominatedPod(logger klog.Logger, pi *framework.PodInfo, nominatingInfo *framework.NominatingInfo) { + npm.Lock() + npm.add(pi, nominatingInfo.NominatedNodeName) + npm.Unlock() +} + +// NominatedPodsForNode returns pods that are nominated to run on the given node, +// but they are waiting for other pods to be removed from the node. +func (npm *nominatedPodMap) NominatedPodsForNode(nodeName string) []*framework.PodInfo { + npm.RLock() + defer npm.RUnlock() + // TODO: we may need to return a copy of []*Pods to avoid modification + // on the caller side. + return npm.nominatedPods[nodeName] +} + func Test_frameworkExtenderImpl_RunScorePlugins(t *testing.T) { tests := []struct { name string pod *corev1.Pod nodes []*corev1.Node - wantScore framework.PluginToNodeScores + wantScore []framework.NodePluginScores wantStatus *framework.Status }{ { name: "normal RunScorePlugins", pod: &corev1.Pod{}, nodes: []*corev1.Node{{}}, - wantScore: framework.PluginToNodeScores{ - "T1": {{Name: "", Score: 0}}, - "T2": {{Name: "", Score: 0}}, + wantScore: []framework.NodePluginScores{ + { + Name: "", + Scores: []framework.PluginScore{ + { + Name: "T1", + Score: 0, + }, + { + Name: "T2", + Score: 0, + }, + }, + }, }, wantStatus: nil, }, @@ -306,6 +455,7 @@ func Test_frameworkExtenderImpl_RunScorePlugins(t *testing.T) { }), 1), } fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", ) @@ -362,6 +512,7 @@ func TestPreBind(t *testing.T) { }), } fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", ) @@ -412,6 +563,7 @@ func TestPreBindExtensionOrder(t *testing.T) { }), } fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", ) @@ -522,6 +674,7 @@ func TestReservationRestorePlugin(t *testing.T) { schedulertesting.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), } fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", ) @@ -645,6 +798,7 @@ func TestReservationFilterPlugin(t *testing.T) { schedulertesting.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), } fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", ) @@ -797,6 +951,7 @@ func TestReservationScorePlugin(t *testing.T) { schedulertesting.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), } fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", ) diff --git a/pkg/scheduler/frameworkext/helper/forcesync_eventhandler.go b/pkg/scheduler/frameworkext/helper/forcesync_eventhandler.go index 73067e9c6..0552c87eb 100644 --- a/pkg/scheduler/frameworkext/helper/forcesync_eventhandler.go +++ b/pkg/scheduler/frameworkext/helper/forcesync_eventhandler.go @@ -54,7 +54,7 @@ func (h *forceSyncEventHandler) waitForSyncDone() { <-h.syncCh } -func (h *forceSyncEventHandler) OnAdd(obj interface{}) { +func (h *forceSyncEventHandler) OnAdd(obj interface{}, isInInitialList bool) { h.waitForSyncDone() if metaAccessor, ok := obj.(metav1.ObjectMetaAccessor); ok { objectMeta := metaAccessor.GetObjectMeta() @@ -69,7 +69,7 @@ func (h *forceSyncEventHandler) OnAdd(obj interface{}) { } } if h.handler != nil { - h.handler.OnAdd(obj) + h.handler.OnAdd(obj, isInInitialList) } } @@ -95,11 +95,11 @@ func (h *forceSyncEventHandler) OnDelete(obj interface{}) { } } -func (h *forceSyncEventHandler) addDirectly(obj interface{}) { +func (h *forceSyncEventHandler) addDirectly(obj interface{}, isInInitialList bool) { if h.handler == nil { return } - h.handler.OnAdd(obj) + h.handler.OnAdd(obj, isInInitialList) if metaAccessor, ok := obj.(metav1.ObjectMetaAccessor); ok { objectMeta := metaAccessor.GetObjectMeta() resourceVersion, err := strconv.ParseInt(objectMeta.GetResourceVersion(), 10, 64) @@ -124,17 +124,17 @@ func WithResyncPeriod(resyncPeriod time.Duration) Option { // ForceSyncFromInformer ensures that the EventHandler will synchronize data immediately after registration, // helping those plugins that need to build memory status through EventHandler to correctly synchronize data -func ForceSyncFromInformer(stopCh <-chan struct{}, cacheSyncer CacheSyncer, informer cache.SharedInformer, handler cache.ResourceEventHandler, options ...Option) { +func ForceSyncFromInformer(stopCh <-chan struct{}, cacheSyncer CacheSyncer, informer cache.SharedInformer, handler cache.ResourceEventHandler, options ...Option) (cache.ResourceEventHandlerRegistration, error) { syncEventHandler := newForceSyncEventHandler(handler, options...) - informer.AddEventHandlerWithResyncPeriod(syncEventHandler, syncEventHandler.resyncPeriod) + registration, err := informer.AddEventHandlerWithResyncPeriod(syncEventHandler, syncEventHandler.resyncPeriod) if cacheSyncer != nil { cacheSyncer.Start(stopCh) cacheSyncer.WaitForCacheSync(stopCh) } allObjects := informer.GetStore().List() for _, obj := range allObjects { - syncEventHandler.addDirectly(obj) + syncEventHandler.addDirectly(obj, true) } syncEventHandler.syncDone() - return + return registration, err } diff --git a/pkg/scheduler/frameworkext/helper/forcesync_informer.go b/pkg/scheduler/frameworkext/helper/forcesync_informer.go index 6ddc4c66c..7fad4fba1 100644 --- a/pkg/scheduler/frameworkext/helper/forcesync_informer.go +++ b/pkg/scheduler/frameworkext/helper/forcesync_informer.go @@ -40,14 +40,13 @@ func newForceSyncSharedIndexInformer(informer cache.SharedIndexInformer, default } } -func (s *forceSyncsharedIndexInformer) AddEventHandler(handler cache.ResourceEventHandler) { - s.AddEventHandlerWithResyncPeriod(handler, s.defaultResync) +func (s *forceSyncsharedIndexInformer) AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) { + return s.AddEventHandlerWithResyncPeriod(handler, s.defaultResync) } -func (s *forceSyncsharedIndexInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { +func (s *forceSyncsharedIndexInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) (cache.ResourceEventHandlerRegistration, error) { if _, ok := handler.(*forceSyncEventHandler); ok { - s.SharedIndexInformer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) - return + return s.SharedIndexInformer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) } - ForceSyncFromInformer(context.Background().Done(), s.factory, s.SharedIndexInformer, handler, WithResyncPeriod(resyncPeriod)) + return ForceSyncFromInformer(context.Background().Done(), s.factory, s.SharedIndexInformer, handler, WithResyncPeriod(resyncPeriod)) } diff --git a/pkg/scheduler/frameworkext/informers.go b/pkg/scheduler/frameworkext/informers.go index 499fe5428..a2f5b8efa 100644 --- a/pkg/scheduler/frameworkext/informers.go +++ b/pkg/scheduler/frameworkext/informers.go @@ -40,7 +40,6 @@ import ( "k8s.io/kubernetes/pkg/apis/storage" storagev1adapt "k8s.io/kubernetes/pkg/apis/storage/v1" storagev1beta1adapt "k8s.io/kubernetes/pkg/apis/storage/v1beta1" - "k8s.io/kubernetes/pkg/features" koordfeatures "github.com/koordinator-sh/koordinator/pkg/features" ) @@ -50,7 +49,7 @@ func SetupCustomInformers(informerFactory informers.SharedInformerFactory) { // Versions below k8s v1.22 need to disable CSIStorageCapacity disableCSIStorageCapacityInformer(informerFactory) } else if k8sfeature.DefaultFeatureGate.Enabled(koordfeatures.CompatibleCSIStorageCapacity) && - k8sfeature.DefaultFeatureGate.Enabled(features.CSIStorageCapacity) { + k8sfeature.DefaultFeatureGate.Enabled(koordfeatures.CSIStorageCapacity) { // The k8s v1.22 version needs to enable the FeatureGate to convert v1beta1.CSIStorageCapacity to v1.CSIStorageCapacity setupCompatibleCSICapacityInformer(informerFactory) } @@ -58,7 +57,7 @@ func SetupCustomInformers(informerFactory informers.SharedInformerFactory) { if k8sfeature.DefaultFeatureGate.Enabled(koordfeatures.DisablePodDisruptionBudgetInformer) { disablePodDisruptionBudgetInformer(informerFactory) } else if k8sfeature.DefaultFeatureGate.Enabled(koordfeatures.CompatiblePodDisruptionBudget) && - k8sfeature.DefaultFeatureGate.Enabled(features.PodDisruptionBudget) { + k8sfeature.DefaultFeatureGate.Enabled(koordfeatures.PodDisruptionBudget) { // Versions below k8s v1.22 need to enable the FeatureGate to convert v1beta1.PodDisruptionBudget to v1.PodDisruptionBudget setupCompatiblePodDisruptionBudgetInformer(informerFactory) } diff --git a/pkg/scheduler/frameworkext/interface.go b/pkg/scheduler/frameworkext/interface.go index ec9f35c1a..f8f35f420 100644 --- a/pkg/scheduler/frameworkext/interface.go +++ b/pkg/scheduler/frameworkext/interface.go @@ -22,6 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" schedconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework" @@ -46,7 +47,7 @@ type ExtendedHandle interface { // After handling scheduling errors, will execute PostErrorHandlerFilter, and if return true, other custom handlers will not be called. RegisterErrorHandlerFilters(preFilter PreErrorHandlerFilter, afterFilter PostErrorHandlerFilter) RegisterForgetPodHandler(handler ForgetPodHandler) - ForgetPod(pod *corev1.Pod) error + ForgetPod(logger klog.Logger, pod *corev1.Pod) error GetReservationNominator() ReservationNominator } diff --git a/pkg/scheduler/frameworkext/reservation_info.go b/pkg/scheduler/frameworkext/reservation_info.go index 7033d1b5c..592ee0c3e 100644 --- a/pkg/scheduler/frameworkext/reservation_info.go +++ b/pkg/scheduler/frameworkext/reservation_info.go @@ -56,7 +56,7 @@ type PodRequirement struct { } func NewPodRequirement(pod *corev1.Pod) *PodRequirement { - requests, _ := resource.PodRequestsAndLimits(pod) + requests := resource.PodRequests(pod, resource.PodResourcesOptions{}) ports := util.RequestedHostPorts(pod) return &PodRequirement{ Namespace: pod.Namespace, @@ -117,7 +117,7 @@ func NewReservationInfo(r *schedulingv1alpha1.Reservation) *ReservationInfo { func NewReservationInfoFromPod(pod *corev1.Pod) *ReservationInfo { var parseErrors []error - allocatable, _ := resource.PodRequestsAndLimits(pod) + allocatable := resource.PodRequests(pod, resource.PodResourcesOptions{}) resourceNames := quotav1.ResourceNames(allocatable) options, err := apiext.GetReservationRestrictedOptions(pod.Annotations) if err == nil { @@ -340,7 +340,7 @@ func (ri *ReservationInfo) UpdateReservation(r *schedulingv1alpha1.Reservation) } func (ri *ReservationInfo) UpdatePod(pod *corev1.Pod) { - ri.Allocatable, _ = resource.PodRequestsAndLimits(pod) + ri.Allocatable = resource.PodRequests(pod, resource.PodResourcesOptions{}) var parseErrors []error resourceNames := quotav1.ResourceNames(ri.Allocatable) options, err := apiext.GetReservationRestrictedOptions(pod.Annotations) diff --git a/pkg/scheduler/frameworkext/scheduler_adapter.go b/pkg/scheduler/frameworkext/scheduler_adapter.go index 54391963a..f102fee0e 100644 --- a/pkg/scheduler/frameworkext/scheduler_adapter.go +++ b/pkg/scheduler/frameworkext/scheduler_adapter.go @@ -22,6 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler" "k8s.io/kubernetes/pkg/scheduler/framework" ) @@ -49,27 +50,27 @@ type Scheduler interface { } type SchedulerCache interface { - AddPod(pod *corev1.Pod) error - UpdatePod(oldPod, newPod *corev1.Pod) error - RemovePod(pod *corev1.Pod) error - AssumePod(pod *corev1.Pod) error + AddPod(logger klog.Logger, pod *corev1.Pod) error + UpdatePod(logger klog.Logger, oldPod, newPod *corev1.Pod) error + RemovePod(logger klog.Logger, pod *corev1.Pod) error + AssumePod(logger klog.Logger, pod *corev1.Pod) error IsAssumedPod(pod *corev1.Pod) (bool, error) GetPod(pod *corev1.Pod) (*corev1.Pod, error) - ForgetPod(pod *corev1.Pod) error - InvalidNodeInfo(nodeName string) error + ForgetPod(logger klog.Logger, pod *corev1.Pod) error + InvalidNodeInfo(logger klog.Logger, nodeName string) error } type PreEnqueueCheck func(pod *corev1.Pod) bool type SchedulingQueue interface { - Add(pod *corev1.Pod) error - Update(oldPod, newPod *corev1.Pod) error + Add(logger klog.Logger, pod *corev1.Pod) error + Update(logger klog.Logger, oldPod, newPod *corev1.Pod) error Delete(pod *corev1.Pod) error - AddUnschedulableIfNotPresent(pod *framework.QueuedPodInfo, podSchedulingCycle int64) error + AddUnschedulableIfNotPresent(logger klog.Logger, pod *framework.QueuedPodInfo, podSchedulingCycle int64) error SchedulingCycle() int64 - AssignedPodAdded(pod *corev1.Pod) - AssignedPodUpdated(pod *corev1.Pod) - MoveAllToActiveOrBackoffQueue(event framework.ClusterEvent, preCheck PreEnqueueCheck) + AssignedPodAdded(logger klog.Logger, pod *corev1.Pod) + AssignedPodUpdated(logger klog.Logger, oldPod, newPod *corev1.Pod) + MoveAllToActiveOrBackoffQueue(logger klog.Logger, event framework.ClusterEvent, oldObj, newObj interface{}, preCheck PreEnqueueCheck) } var _ Scheduler = &SchedulerAdapter{} @@ -86,8 +87,8 @@ func (s *SchedulerAdapter) GetSchedulingQueue() SchedulingQueue { return &queueAdapter{s.Scheduler} } -func (s *SchedulerAdapter) MoveAllToActiveOrBackoffQueue(event framework.ClusterEvent, preCheck PreEnqueueCheck) { - s.Scheduler.SchedulingQueue.MoveAllToActiveOrBackoffQueue(event, func(pod *corev1.Pod) bool { +func (s *SchedulerAdapter) MoveAllToActiveOrBackoffQueue(logger klog.Logger, event framework.ClusterEvent, oldObj, newObj interface{}, preCheck PreEnqueueCheck) { + s.Scheduler.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, event, oldObj, newObj, func(pod *corev1.Pod) bool { if preCheck != nil { return preCheck(pod) } @@ -101,19 +102,19 @@ type cacheAdapter struct { scheduler *scheduler.Scheduler } -func (c *cacheAdapter) AddPod(pod *corev1.Pod) error { - return c.scheduler.Cache.AddPod(pod) +func (c *cacheAdapter) AddPod(logger klog.Logger, pod *corev1.Pod) error { + return c.scheduler.Cache.AddPod(logger, pod) } -func (c *cacheAdapter) UpdatePod(oldPod, newPod *corev1.Pod) error { - return c.scheduler.Cache.UpdatePod(oldPod, newPod) +func (c *cacheAdapter) UpdatePod(logger klog.Logger, oldPod, newPod *corev1.Pod) error { + return c.scheduler.Cache.UpdatePod(logger, oldPod, newPod) } -func (c *cacheAdapter) RemovePod(pod *corev1.Pod) error { - return c.scheduler.Cache.RemovePod(pod) +func (c *cacheAdapter) RemovePod(logger klog.Logger, pod *corev1.Pod) error { + return c.scheduler.Cache.RemovePod(logger, pod) } -func (c *cacheAdapter) AssumePod(pod *corev1.Pod) error { - return c.scheduler.Cache.AssumePod(pod) +func (c *cacheAdapter) AssumePod(logger klog.Logger, pod *corev1.Pod) error { + return c.scheduler.Cache.AssumePod(logger, pod) } func (c *cacheAdapter) IsAssumedPod(pod *corev1.Pod) (bool, error) { @@ -124,20 +125,20 @@ func (c *cacheAdapter) GetPod(pod *corev1.Pod) (*corev1.Pod, error) { return c.scheduler.Cache.GetPod(pod) } -func (c *cacheAdapter) ForgetPod(pod *corev1.Pod) error { - return c.scheduler.Cache.ForgetPod(pod) +func (c *cacheAdapter) ForgetPod(logger klog.Logger, pod *corev1.Pod) error { + return c.scheduler.Cache.ForgetPod(logger, pod) } -func (c *cacheAdapter) InvalidNodeInfo(nodeName string) error { +func (c *cacheAdapter) InvalidNodeInfo(logger klog.Logger, nodeName string) error { val := podPool.Get() defer podPool.Put(val) pod := val.(*corev1.Pod) pod.Spec.NodeName = nodeName - err := c.scheduler.Cache.AddPod(pod) + err := c.scheduler.Cache.AddPod(logger, pod) if err != nil { return err } - return c.scheduler.Cache.RemovePod(pod) + return c.scheduler.Cache.RemovePod(logger, pod) } var _ SchedulingQueue = &queueAdapter{} @@ -146,36 +147,36 @@ type queueAdapter struct { scheduler *scheduler.Scheduler } -func (q *queueAdapter) Add(pod *corev1.Pod) error { - return q.scheduler.SchedulingQueue.Add(pod) +func (q *queueAdapter) Add(logger klog.Logger, pod *corev1.Pod) error { + return q.scheduler.SchedulingQueue.Add(logger, pod) } -func (q *queueAdapter) Update(oldPod, newPod *corev1.Pod) error { - return q.scheduler.SchedulingQueue.Update(oldPod, newPod) +func (q *queueAdapter) Update(logger klog.Logger, oldPod, newPod *corev1.Pod) error { + return q.scheduler.SchedulingQueue.Update(logger, oldPod, newPod) } func (q *queueAdapter) Delete(pod *corev1.Pod) error { return q.scheduler.SchedulingQueue.Delete(pod) } -func (q *queueAdapter) AddUnschedulableIfNotPresent(pod *framework.QueuedPodInfo, podSchedulingCycle int64) error { - return q.scheduler.SchedulingQueue.AddUnschedulableIfNotPresent(pod, podSchedulingCycle) +func (q *queueAdapter) AddUnschedulableIfNotPresent(logger klog.Logger, pInfo *framework.QueuedPodInfo, podSchedulingCycle int64) error { + return q.scheduler.SchedulingQueue.AddUnschedulableIfNotPresent(logger, pInfo, podSchedulingCycle) } func (q *queueAdapter) SchedulingCycle() int64 { return q.scheduler.SchedulingQueue.SchedulingCycle() } -func (q *queueAdapter) AssignedPodAdded(pod *corev1.Pod) { - q.scheduler.SchedulingQueue.AssignedPodAdded(pod) +func (q *queueAdapter) AssignedPodAdded(logger klog.Logger, pod *corev1.Pod) { + q.scheduler.SchedulingQueue.AssignedPodAdded(logger, pod) } -func (q *queueAdapter) AssignedPodUpdated(pod *corev1.Pod) { - q.scheduler.SchedulingQueue.AssignedPodUpdated(pod) +func (q *queueAdapter) AssignedPodUpdated(logger klog.Logger, oldPod, newPod *corev1.Pod) { + q.scheduler.SchedulingQueue.AssignedPodUpdated(logger, oldPod, newPod) } -func (q *queueAdapter) MoveAllToActiveOrBackoffQueue(event framework.ClusterEvent, preCheck PreEnqueueCheck) { - q.scheduler.SchedulingQueue.MoveAllToActiveOrBackoffQueue(event, func(pod *corev1.Pod) bool { +func (q *queueAdapter) MoveAllToActiveOrBackoffQueue(logger klog.Logger, event framework.ClusterEvent, oldObj, newObj interface{}, preCheck PreEnqueueCheck) { + q.scheduler.SchedulingQueue.MoveAllToActiveOrBackoffQueue(logger, event, oldObj, newObj, func(pod *corev1.Pod) bool { if preCheck != nil { return preCheck(pod) } @@ -223,26 +224,26 @@ func (f *FakeScheduler) GetSchedulingQueue() SchedulingQueue { return f.Queue } -func (f *FakeScheduler) AddPod(pod *corev1.Pod) error { +func (f *FakeScheduler) AddPod(logger klog.Logger, pod *corev1.Pod) error { key, _ := framework.GetPodKey(pod) f.Pods[key] = pod delete(f.AssumedPod, key) return nil } -func (f *FakeScheduler) UpdatePod(oldPod, newPod *corev1.Pod) error { +func (f *FakeScheduler) UpdatePod(logger klog.Logger, oldPod, newPod *corev1.Pod) error { key, _ := framework.GetPodKey(newPod) f.Pods[key] = newPod return nil } -func (f *FakeScheduler) RemovePod(pod *corev1.Pod) error { +func (f *FakeScheduler) RemovePod(logger klog.Logger, pod *corev1.Pod) error { key, _ := framework.GetPodKey(pod) delete(f.Pods, key) return nil } -func (f *FakeScheduler) AssumePod(pod *corev1.Pod) error { +func (f *FakeScheduler) AssumePod(logger klog.Logger, pod *corev1.Pod) error { key, _ := framework.GetPodKey(pod) f.AssumedPod[key] = pod return nil @@ -260,13 +261,13 @@ func (f *FakeScheduler) GetPod(pod *corev1.Pod) (*corev1.Pod, error) { return p, nil } -func (f *FakeScheduler) ForgetPod(pod *corev1.Pod) error { +func (f *FakeScheduler) ForgetPod(logger klog.Logger, pod *corev1.Pod) error { key, _ := framework.GetPodKey(pod) delete(f.AssumedPod, key) return nil } -func (f *FakeScheduler) InvalidNodeInfo(nodeName string) error { +func (f *FakeScheduler) InvalidNodeInfo(logger klog.Logger, nodeName string) error { val := podPool.Get() defer podPool.Put(val) pod := val.(*corev1.Pod) @@ -283,13 +284,13 @@ func (f *FakeScheduler) InvalidNodeInfo(nodeName string) error { return nodeInfo.RemovePod(pod) } -func (f *FakeQueue) Add(pod *corev1.Pod) error { +func (f *FakeQueue) Add(logger klog.Logger, pod *corev1.Pod) error { key, _ := framework.GetPodKey(pod) f.Pods[key] = pod return nil } -func (f *FakeQueue) Update(oldPod, newPod *corev1.Pod) error { +func (f *FakeQueue) Update(logger klog.Logger, oldPod, newPod *corev1.Pod) error { key, _ := framework.GetPodKey(newPod) f.Pods[key] = newPod return nil @@ -302,7 +303,7 @@ func (f *FakeQueue) Delete(pod *corev1.Pod) error { return nil } -func (f *FakeQueue) AddUnschedulableIfNotPresent(pod *framework.QueuedPodInfo, podSchedulingCycle int64) error { +func (f *FakeQueue) AddUnschedulableIfNotPresent(logger klog.Logger, pod *framework.QueuedPodInfo, podSchedulingCycle int64) error { key, _ := framework.GetPodKey(pod.Pod) f.UnschedulablePods[key] = pod.Pod return nil @@ -312,16 +313,16 @@ func (f *FakeQueue) SchedulingCycle() int64 { return 0 } -func (f *FakeQueue) AssignedPodAdded(pod *corev1.Pod) { +func (f *FakeQueue) AssignedPodAdded(logger klog.Logger, pod *corev1.Pod) { key, _ := framework.GetPodKey(pod) f.AssignedPods[key] = pod } -func (f *FakeQueue) AssignedPodUpdated(pod *corev1.Pod) { - key, _ := framework.GetPodKey(pod) - f.AssignedUpdatedPods[key] = pod +func (f *FakeQueue) AssignedPodUpdated(logger klog.Logger, oldPod, newPod *corev1.Pod) { + key, _ := framework.GetPodKey(newPod) + f.AssignedUpdatedPods[key] = newPod } -func (f *FakeQueue) MoveAllToActiveOrBackoffQueue(event framework.ClusterEvent, preCheck PreEnqueueCheck) { +func (f *FakeQueue) MoveAllToActiveOrBackoffQueue(logger klog.Logger, event framework.ClusterEvent, oldObj, newObj interface{}, preCheck PreEnqueueCheck) { } diff --git a/pkg/scheduler/frameworkext/testing/fake_reservation_nominator.go b/pkg/scheduler/frameworkext/testing/fake_reservation_nominator.go index 5b13c2e3a..67459abee 100644 --- a/pkg/scheduler/frameworkext/testing/fake_reservation_nominator.go +++ b/pkg/scheduler/frameworkext/testing/fake_reservation_nominator.go @@ -107,7 +107,8 @@ func (nm *FakeNominator) AddNominatedReservePod(rInfo *corev1.Pod, nodeName stri return } } - nm.nominatedReservePod[nodeName] = append(nm.nominatedReservePod[nodeName], framework.NewPodInfo(rInfo)) + podInfo, _ := framework.NewPodInfo(rInfo) + nm.nominatedReservePod[nodeName] = append(nm.nominatedReservePod[nodeName], podInfo) } func (nm *FakeNominator) DeleteNominatedReservePod(rInfo *corev1.Pod) { diff --git a/pkg/scheduler/plugins/coscheduling/controller/podgroup.go b/pkg/scheduler/plugins/coscheduling/controller/podgroup.go index 6fd815f25..3acdb1329 100644 --- a/pkg/scheduler/plugins/coscheduling/controller/podgroup.go +++ b/pkg/scheduler/plugins/coscheduling/controller/podgroup.go @@ -18,14 +18,13 @@ limitations under the License. package controller import ( + "context" "fmt" + "reflect" "sort" "strings" "time" - "context" - "reflect" - v1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,11 +36,11 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - schedv1alpha1 "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" - schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" - schedinformer "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1" - schedlister "sigs.k8s.io/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1" + schedv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + schedclientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" + schedinformer "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1" + schedlister "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/coscheduling/core" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/coscheduling/util" ) diff --git a/pkg/scheduler/plugins/coscheduling/controller/podgroup_test.go b/pkg/scheduler/plugins/coscheduling/controller/podgroup_test.go index 0200d4909..43639003f 100644 --- a/pkg/scheduler/plugins/coscheduling/controller/podgroup_test.go +++ b/pkg/scheduler/plugins/coscheduling/controller/podgroup_test.go @@ -32,10 +32,10 @@ import ( "k8s.io/kubernetes/pkg/controller" st "k8s.io/kubernetes/pkg/scheduler/testing" "k8s.io/utils/pointer" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" - pgfake "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned/fake" - schedinformer "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + pgfake "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake" + schedinformer "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions" koordfake "github.com/koordinator-sh/koordinator/pkg/client/clientset/versioned/fake" koordinformers "github.com/koordinator-sh/koordinator/pkg/client/informers/externalversions" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" diff --git a/pkg/scheduler/plugins/coscheduling/core/core.go b/pkg/scheduler/plugins/coscheduling/core/core.go index 942a965fa..80b188731 100644 --- a/pkg/scheduler/plugins/coscheduling/core/core.go +++ b/pkg/scheduler/plugins/coscheduling/core/core.go @@ -32,12 +32,12 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler/framework" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" - pgclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" - pgformers "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions" - pglister "sigs.k8s.io/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + pgclientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" + pgformers "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions" + pglister "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1" koordinatorinformers "github.com/koordinator-sh/koordinator/pkg/client/informers/externalversions" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" frameworkexthelper "github.com/koordinator-sh/koordinator/pkg/scheduler/frameworkext/helper" @@ -494,7 +494,10 @@ func (pgMgr *PodGroupManager) GetCreatTime(podInfo *framework.QueuedPodInfo) tim // first check if the pod belongs to the Gang // it doesn't belong to the gang,we get the creation time of the pod if !util.IsPodNeedGang(podInfo.Pod) { - return podInfo.InitialAttemptTimestamp + if podInfo.InitialAttemptTimestamp == nil { + return time.Now() + } + return *podInfo.InitialAttemptTimestamp } gang := pgMgr.GetGangByPod(podInfo.Pod) // it belongs to a gang,we get the creation time of the Gang diff --git a/pkg/scheduler/plugins/coscheduling/core/core_test.go b/pkg/scheduler/plugins/coscheduling/core/core_test.go index 0f8b34a93..3b28e87be 100644 --- a/pkg/scheduler/plugins/coscheduling/core/core_test.go +++ b/pkg/scheduler/plugins/coscheduling/core/core_test.go @@ -31,12 +31,12 @@ import ( "k8s.io/client-go/util/retry" "k8s.io/kubernetes/pkg/scheduler/framework" st "k8s.io/kubernetes/pkg/scheduler/testing" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" - fakepgclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned/fake" - pgformers "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions" - pginformer "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + fakepgclientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake" + pgformers "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions" + pginformer "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1" koordfake "github.com/koordinator-sh/koordinator/pkg/client/clientset/versioned/fake" koordinformers "github.com/koordinator-sh/koordinator/pkg/client/informers/externalversions" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" diff --git a/pkg/scheduler/plugins/coscheduling/core/gang.go b/pkg/scheduler/plugins/coscheduling/core/gang.go index caa7c174a..40c47c457 100644 --- a/pkg/scheduler/plugins/coscheduling/core/gang.go +++ b/pkg/scheduler/plugins/coscheduling/core/gang.go @@ -23,9 +23,9 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/klog/v2" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/coscheduling/util" ) diff --git a/pkg/scheduler/plugins/coscheduling/core/gang_cache.go b/pkg/scheduler/plugins/coscheduling/core/gang_cache.go index 7332f66e5..a1a269174 100644 --- a/pkg/scheduler/plugins/coscheduling/core/gang_cache.go +++ b/pkg/scheduler/plugins/coscheduling/core/gang_cache.go @@ -22,10 +22,10 @@ import ( v1 "k8s.io/api/core/v1" listerv1 "k8s.io/client-go/listers/core/v1" "k8s.io/klog/v2" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" - pgclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" - pglister "sigs.k8s.io/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + pgclientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" + pglister "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/coscheduling/util" koordutil "github.com/koordinator-sh/koordinator/pkg/util" diff --git a/pkg/scheduler/plugins/coscheduling/core/gang_cache_test.go b/pkg/scheduler/plugins/coscheduling/core/gang_cache_test.go index e8c7ee0f8..2340fcf1e 100644 --- a/pkg/scheduler/plugins/coscheduling/core/gang_cache_test.go +++ b/pkg/scheduler/plugins/coscheduling/core/gang_cache_test.go @@ -27,13 +27,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/retry" "k8s.io/utils/pointer" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" - fakepgclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned/fake" - pgformers "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + fakepgclientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake" + pgformers "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" - "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta2" + "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta3" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/coscheduling/util" ) @@ -44,10 +44,10 @@ var fakeTimeNowFn = func() time.Time { } func getTestDefaultCoschedulingArgs(t *testing.T) *config.CoschedulingArgs { - var v1beta2args v1beta2.CoschedulingArgs - v1beta2.SetDefaults_CoschedulingArgs(&v1beta2args) + var v1beta3args v1beta3.CoschedulingArgs + v1beta3.SetDefaults_CoschedulingArgs(&v1beta3args) var args config.CoschedulingArgs - err := v1beta2.Convert_v1beta2_CoschedulingArgs_To_config_CoschedulingArgs(&v1beta2args, &args, nil) + err := v1beta3.Convert_v1beta3_CoschedulingArgs_To_config_CoschedulingArgs(&v1beta3args, &args, nil) assert.NoError(t, err) return &args } diff --git a/pkg/scheduler/plugins/coscheduling/coscheduling.go b/pkg/scheduler/plugins/coscheduling/coscheduling.go index 66a41f8e0..debc52cea 100644 --- a/pkg/scheduler/plugins/coscheduling/coscheduling.go +++ b/pkg/scheduler/plugins/coscheduling/coscheduling.go @@ -27,12 +27,12 @@ import ( corev1helpers "k8s.io/component-helpers/scheduling/corev1" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler/framework" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling" - pgclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" - pgformers "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions" - schedinformers "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling" + pgclientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" + pgformers "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions" + schedinformers "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/validation" "github.com/koordinator-sh/koordinator/pkg/scheduler/frameworkext" @@ -95,13 +95,13 @@ func New(obj runtime.Object, handle framework.Handle) (framework.Plugin, error) return plugin, nil } -func (cs *Coscheduling) EventsToRegister() []framework.ClusterEvent { +func (cs *Coscheduling) EventsToRegister() []framework.ClusterEventWithHint { // To register a custom event, follow the naming convention at: // https://git.k8s.io/kubernetes/pkg/scheduler/eventhandlers.go#L403-L410 pgGVK := fmt.Sprintf("podgroups.v1alpha1.%v", scheduling.GroupName) - return []framework.ClusterEvent{ - {Resource: framework.Pod, ActionType: framework.Add}, - {Resource: framework.GVK(pgGVK), ActionType: framework.Add | framework.Update}, + return []framework.ClusterEventWithHint{ + {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.Add}}, + {Event: framework.ClusterEvent{Resource: framework.GVK(pgGVK), ActionType: framework.Add | framework.Update}}, } } diff --git a/pkg/scheduler/plugins/coscheduling/coscheduling_test.go b/pkg/scheduler/plugins/coscheduling/coscheduling_test.go index 5440d818b..8aed75518 100644 --- a/pkg/scheduler/plugins/coscheduling/coscheduling_test.go +++ b/pkg/scheduler/plugins/coscheduling/coscheduling_test.go @@ -36,8 +36,9 @@ import ( "k8s.io/client-go/kubernetes" kubefake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/events" + "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - "k8s.io/kube-scheduler/config/v1beta3" + scheduledv1beta3 "k8s.io/kube-scheduler/config/v1beta3" "k8s.io/kubernetes/pkg/scheduler" scheduledconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing" @@ -49,15 +50,15 @@ import ( schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing" "k8s.io/utils/pointer" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" - pgclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" - fakepgclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned/fake" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + pgclientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" + fakepgclientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake" koordfake "github.com/koordinator-sh/koordinator/pkg/client/clientset/versioned/fake" koordinatorinformers "github.com/koordinator-sh/koordinator/pkg/client/informers/externalversions" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" - "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta2" + "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta3" "github.com/koordinator-sh/koordinator/pkg/scheduler/frameworkext" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/coscheduling/core" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/coscheduling/util" @@ -149,6 +150,14 @@ func (f *testSharedLister) NodeInfos() framework.NodeInfoLister { return f } +func (f *testSharedLister) StorageInfos() framework.StorageInfoLister { + return f +} + +func (f *testSharedLister) IsPVCUsedByPods(key string) bool { + return false +} + func (f *testSharedLister) List() ([]*framework.NodeInfo, error) { return f.nodeInfos, nil } @@ -173,10 +182,10 @@ type pluginTestSuit struct { } func newPluginTestSuit(t *testing.T, nodes []*corev1.Node, pgClientSet pgclientset.Interface, cs kubernetes.Interface) *pluginTestSuit { - var v1beta2args v1beta2.CoschedulingArgs - v1beta2.SetDefaults_CoschedulingArgs(&v1beta2args) + var v1beta3args v1beta3.CoschedulingArgs + v1beta3.SetDefaults_CoschedulingArgs(&v1beta3args) var gangSchedulingArgs config.CoschedulingArgs - err := v1beta2.Convert_v1beta2_CoschedulingArgs_To_config_CoschedulingArgs(&v1beta2args, &gangSchedulingArgs, nil) + err := v1beta3.Convert_v1beta3_CoschedulingArgs_To_config_CoschedulingArgs(&v1beta3args, &gangSchedulingArgs, nil) assert.NoError(t, err) gangSchedulingPluginConfig := scheduledconfig.PluginConfig{ @@ -200,15 +209,19 @@ func newPluginTestSuit(t *testing.T, nodes []*corev1.Node, pgClientSet pgclients schedulertesting.RegisterPermitPlugin(Name, proxyNew), schedulertesting.RegisterPluginAsExtensions(Name, proxyNew, "PostBind"), } + fakeRecorder := record.NewFakeRecorder(1024) + eventRecorder := record.NewEventRecorderAdapter(fakeRecorder) informerFactory := informers.NewSharedInformerFactory(cs, 0) snapshot := newTestSharedLister(nil, nodes) fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", runtime.WithClientSet(cs), runtime.WithInformerFactory(informerFactory), runtime.WithSnapshotSharedLister(snapshot), + runtime.WithEventRecorder(eventRecorder), ) assert.Nil(t, err) return &pluginTestSuit{ @@ -225,6 +238,12 @@ func (p *pluginTestSuit) start() { p.Handle.SharedInformerFactory().WaitForCacheSync(ctx.Done()) } +func NewPodInfo(t *testing.T, pod *corev1.Pod) *framework.PodInfo { + podInfo, err := framework.NewPodInfo(pod) + assert.NoError(t, err) + return podInfo +} + func TestLess(t *testing.T) { { //pod priority @@ -991,15 +1010,15 @@ func TestFairness(t *testing.T) { assert.NoError(t, err) } - cfg := configtesting.V1beta3ToInternalWithDefaults(t, v1beta3.KubeSchedulerConfiguration{ - Profiles: []v1beta3.KubeSchedulerProfile{{ + cfg := configtesting.V1beta3ToInternalWithDefaults(t, scheduledv1beta3.KubeSchedulerConfiguration{ + Profiles: []scheduledv1beta3.KubeSchedulerProfile{{ SchedulerName: pointer.StringPtr("koord-scheduler"), - Plugins: &v1beta3.Plugins{ - QueueSort: v1beta3.PluginSet{ - Enabled: []v1beta3.Plugin{ + Plugins: &scheduledv1beta3.Plugins{ + QueueSort: scheduledv1beta3.PluginSet{ + Enabled: []scheduledv1beta3.Plugin{ {Name: "fakeQueueSortPlugin"}, }, - Disabled: []v1beta3.Plugin{ + Disabled: []scheduledv1beta3.Plugin{ {Name: "*"}, }, }, @@ -1016,11 +1035,11 @@ func TestFairness(t *testing.T) { eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: cs.EventsV1()}) ctx := context.TODO() sched, err := scheduler.New( + ctx, cs, suit.SharedInformerFactory(), nil, profile.NewRecorderFactory(eventBroadcaster), - ctx.Done(), scheduler.WithProfiles(cfg.Profiles...), scheduler.WithFrameworkOutOfTreeRegistry(registry), scheduler.WithPodInitialBackoffSeconds(0), @@ -1030,7 +1049,8 @@ func TestFairness(t *testing.T) { assert.NoError(t, err) eventBroadcaster.StartRecordingToSink(ctx.Done()) suit.start() - sched.SchedulingQueue.Run() + logger := klog.FromContext(ctx) + sched.SchedulingQueue.Run(logger) var scheduleOrder []*debugPodScheduleInfo @@ -1099,7 +1119,7 @@ func (p *debugPodScheduleInfo) String() string { } func simulateScheduleOne(t *testing.T, ctx context.Context, sched *scheduler.Scheduler, suit *pluginTestSuit, scheduleOrder *[]*debugPodScheduleInfo, injectFilterErr func(pod *corev1.Pod) bool) { - podInfo := sched.NextPod() + podInfo, _ := sched.NextPod() pod := podInfo.Pod scheduleInfo := &debugPodScheduleInfo{ @@ -1135,9 +1155,9 @@ func simulateScheduleOne(t *testing.T, ctx context.Context, sched *scheduler.Sch // Run PostFilter plugins to try to make the pod schedulable in a future scheduling cycle. _, status := suit.plugin.(*Coscheduling).PostFilter(ctx, state, pod, fitError.Diagnosis.NodeToStatusMap) assert.False(t, status.IsSuccess()) + klog.Info("sched.Error:" + podInfo.Pod.Name) + sched.FailureHandler(ctx, fwk, podInfo, status, &framework.NominatingInfo{}, time.Time{}) } - klog.Info("sched.Error:" + podInfo.Pod.Name) - sched.Error(podInfo, err) return } @@ -1154,13 +1174,13 @@ func simulateScheduleOne(t *testing.T, ctx context.Context, sched *scheduler.Sch // One of the plugins returned status different from success or wait. fwk.RunReservePluginsUnreserve(schedulingCycleCtx, state, assumedPod, scheduleResult.SuggestedHost) klog.Info("sched.Error:" + assumedPodInfo.Pod.Name) - sched.Error(assumedPodInfo, runPermitStatus.AsError()) + sched.FailureHandler(ctx, fwk, assumedPodInfo, runPermitStatus, &framework.NominatingInfo{}, time.Time{}) return } // At the end of a successful scheduling cycle, pop and move up Pods if needed. if len(podsToActivate.Map) != 0 { - sched.SchedulingQueue.Activate(podsToActivate.Map) + sched.SchedulingQueue.Activate(klog.FromContext(ctx), podsToActivate.Map) // Clear the entries after activation. podsToActivate.Map = make(map[string]*corev1.Pod) } @@ -1175,7 +1195,7 @@ func simulateScheduleOne(t *testing.T, ctx context.Context, sched *scheduler.Sch // trigger un-reserve plugins to clean up state associated with the reserved Pod fwk.RunReservePluginsUnreserve(bindingCycleCtx, state, assumedPod, scheduleResult.SuggestedHost) klog.Info("sched.Error:" + assumedPodInfo.Pod.Name) - sched.Error(assumedPodInfo, waitOnPermitStatus.AsError()) + sched.FailureHandler(ctx, fwk, assumedPodInfo, waitOnPermitStatus, &framework.NominatingInfo{}, time.Time{}) return } @@ -1184,7 +1204,8 @@ func simulateScheduleOne(t *testing.T, ctx context.Context, sched *scheduler.Sch // At the end of a successful binding cycle, move up Pods if needed. if len(podsToActivate.Map) != 0 { - sched.SchedulingQueue.Activate(podsToActivate.Map) + logger := klog.FromContext(ctx) + sched.SchedulingQueue.Activate(logger, podsToActivate.Map) // Unlike the logic in scheduling cycle, we don't bother deleting the entries // as `podsToActivate.Map` is no longer consumed. } @@ -1194,7 +1215,7 @@ func simulateScheduleOne(t *testing.T, ctx context.Context, sched *scheduler.Sch func schedulePod(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *corev1.Pod, info *debugPodScheduleInfo, injectFilterError bool) (result scheduler.ScheduleResult, err error) { diagnosis := framework.Diagnosis{ NodeToStatusMap: make(framework.NodeToStatusMap), - UnschedulablePlugins: sets.NewString(), + UnschedulablePlugins: sets.Set[string]{}, } // Run "prefilter" plugins. @@ -1266,15 +1287,15 @@ func TestDeadLockFree(t *testing.T) { assert.NoError(t, err) } - cfg := configtesting.V1beta3ToInternalWithDefaults(t, v1beta3.KubeSchedulerConfiguration{ - Profiles: []v1beta3.KubeSchedulerProfile{{ + cfg := configtesting.V1beta3ToInternalWithDefaults(t, scheduledv1beta3.KubeSchedulerConfiguration{ + Profiles: []scheduledv1beta3.KubeSchedulerProfile{{ SchedulerName: pointer.StringPtr("koord-scheduler"), - Plugins: &v1beta3.Plugins{ - QueueSort: v1beta3.PluginSet{ - Enabled: []v1beta3.Plugin{ + Plugins: &scheduledv1beta3.Plugins{ + QueueSort: scheduledv1beta3.PluginSet{ + Enabled: []scheduledv1beta3.Plugin{ {Name: "fakeQueueSortPlugin"}, }, - Disabled: []v1beta3.Plugin{ + Disabled: []scheduledv1beta3.Plugin{ {Name: "*"}, }, }, @@ -1290,11 +1311,11 @@ func TestDeadLockFree(t *testing.T) { eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: cs.EventsV1()}) ctx := context.TODO() - sched, err := scheduler.New(cs, + sched, err := scheduler.New(ctx, + cs, suit.SharedInformerFactory(), nil, profile.NewRecorderFactory(eventBroadcaster), - ctx.Done(), scheduler.WithProfiles(cfg.Profiles...), scheduler.WithFrameworkOutOfTreeRegistry(registry), scheduler.WithPodInitialBackoffSeconds(1), @@ -1305,13 +1326,14 @@ func TestDeadLockFree(t *testing.T) { assert.NoError(t, err) eventBroadcaster.StartRecordingToSink(ctx.Done()) suit.start() - sched.SchedulingQueue.Run() + sched.SchedulingQueue.Run(klog.FromContext(ctx)) var scheduleOrder []*debugPodScheduleInfo for i := 0; i < 3; i++ { for j := 0; j < len(allPods); j++ { - if len(sched.SchedulingQueue.PendingPods()) == 0 { + pendingPods, _ := sched.SchedulingQueue.PendingPods() + if len(pendingPods) == 0 { break } @@ -1328,7 +1350,8 @@ func TestDeadLockFree(t *testing.T) { }) } } - assert.Equal(t, 0, len(sched.SchedulingQueue.PendingPods())) + pendingPods, _ := sched.SchedulingQueue.PendingPods() + assert.Equal(t, 0, len(pendingPods)) for _, info := range scheduleOrder { klog.Infoln(info) } @@ -1382,15 +1405,15 @@ func TestNoRejectWhenInvalidCycle(t *testing.T) { assert.NoError(t, err) } - cfg := configtesting.V1beta3ToInternalWithDefaults(t, v1beta3.KubeSchedulerConfiguration{ - Profiles: []v1beta3.KubeSchedulerProfile{{ + cfg := configtesting.V1beta3ToInternalWithDefaults(t, scheduledv1beta3.KubeSchedulerConfiguration{ + Profiles: []scheduledv1beta3.KubeSchedulerProfile{{ SchedulerName: pointer.StringPtr("koord-scheduler"), - Plugins: &v1beta3.Plugins{ - QueueSort: v1beta3.PluginSet{ - Enabled: []v1beta3.Plugin{ + Plugins: &scheduledv1beta3.Plugins{ + QueueSort: scheduledv1beta3.PluginSet{ + Enabled: []scheduledv1beta3.Plugin{ {Name: "fakeQueueSortPlugin"}, }, - Disabled: []v1beta3.Plugin{ + Disabled: []scheduledv1beta3.Plugin{ {Name: "*"}, }, }, @@ -1406,11 +1429,12 @@ func TestNoRejectWhenInvalidCycle(t *testing.T) { eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: cs.EventsV1()}) ctx := context.TODO() - sched, err := scheduler.New(cs, + sched, err := scheduler.New( + ctx, + cs, suit.SharedInformerFactory(), nil, profile.NewRecorderFactory(eventBroadcaster), - ctx.Done(), scheduler.WithProfiles(cfg.Profiles...), scheduler.WithFrameworkOutOfTreeRegistry(registry), scheduler.WithPodInitialBackoffSeconds(1), @@ -1421,7 +1445,8 @@ func TestNoRejectWhenInvalidCycle(t *testing.T) { assert.NoError(t, err) eventBroadcaster.StartRecordingToSink(ctx.Done()) suit.start() - sched.SchedulingQueue.Run() + logger := klog.FromContext(ctx) + sched.SchedulingQueue.Run(logger) var scheduleOrder []*debugPodScheduleInfo @@ -1433,7 +1458,8 @@ func TestNoRejectWhenInvalidCycle(t *testing.T) { for i := 0; i < 5; i++ { for j := 0; j < len(allPods); j++ { - if len(sched.SchedulingQueue.PendingPods()) == 0 { + pendingPods, _ := sched.SchedulingQueue.PendingPods() + if len(pendingPods) == 0 { break } @@ -1442,7 +1468,8 @@ func TestNoRejectWhenInvalidCycle(t *testing.T) { }) } } - assert.Equal(t, 0, len(sched.SchedulingQueue.PendingPods())) + pendingPods, _ := sched.SchedulingQueue.PendingPods() + assert.Equal(t, 0, len(pendingPods)) for _, info := range scheduleOrder { klog.Infoln(info) } diff --git a/pkg/scheduler/plugins/coscheduling/plugin_service_test.go b/pkg/scheduler/plugins/coscheduling/plugin_service_test.go index 3d38c8089..cdb74f7b0 100644 --- a/pkg/scheduler/plugins/coscheduling/plugin_service_test.go +++ b/pkg/scheduler/plugins/coscheduling/plugin_service_test.go @@ -20,19 +20,19 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort" "k8s.io/kubernetes/pkg/scheduler/framework/runtime" schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" - fakepgclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned/fake" "github.com/koordinator-sh/koordinator/apis/extension" + fakepgclientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" - "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta2" + "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta3" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/coscheduling/core" ) func newPluginTestSuitForGangAPI(t *testing.T, nodes []*corev1.Node) *pluginTestSuit { - var v1beta2args v1beta2.CoschedulingArgs - v1beta2.SetDefaults_CoschedulingArgs(&v1beta2args) + var v1beta3args v1beta3.CoschedulingArgs + v1beta3.SetDefaults_CoschedulingArgs(&v1beta3args) var gangSchedulingArgs config.CoschedulingArgs - err := v1beta2.Convert_v1beta2_CoschedulingArgs_To_config_CoschedulingArgs(&v1beta2args, &gangSchedulingArgs, nil) + err := v1beta3.Convert_v1beta3_CoschedulingArgs_To_config_CoschedulingArgs(&v1beta3args, &gangSchedulingArgs, nil) assert.NoError(t, err) pgClientSet := fakepgclientset.NewSimpleClientset() @@ -47,6 +47,7 @@ func newPluginTestSuitForGangAPI(t *testing.T, nodes []*corev1.Node) *pluginTest informerFactory := informers.NewSharedInformerFactory(cs, 0) snapshot := newTestSharedLister(nil, nodes) fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", runtime.WithClientSet(cs), diff --git a/pkg/scheduler/plugins/coscheduling/util/gang_helper.go b/pkg/scheduler/plugins/coscheduling/util/gang_helper.go index de2ff58f5..c3df47f6b 100644 --- a/pkg/scheduler/plugins/coscheduling/util/gang_helper.go +++ b/pkg/scheduler/plugins/coscheduling/util/gang_helper.go @@ -27,9 +27,9 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/klog/v2" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" ) func GetGangGroupId(s []string) string { diff --git a/pkg/scheduler/plugins/deviceshare/plugin.go b/pkg/scheduler/plugins/deviceshare/plugin.go index c388820eb..82945864c 100644 --- a/pkg/scheduler/plugins/deviceshare/plugin.go +++ b/pkg/scheduler/plugins/deviceshare/plugin.go @@ -138,12 +138,12 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, *fram return state, nil } -func (p *Plugin) EventsToRegister() []framework.ClusterEvent { +func (p *Plugin) EventsToRegister() []framework.ClusterEventWithHint { // To register a custom event, follow the naming convention at: // https://github.com/kubernetes/kubernetes/blob/e1ad9bee5bba8fbe85a6bf6201379ce8b1a611b1/pkg/scheduler/eventhandlers.go#L415-L422 gvk := fmt.Sprintf("devices.%v.%v", schedulingv1alpha1.GroupVersion.Version, schedulingv1alpha1.GroupVersion.Group) - return []framework.ClusterEvent{ - {Resource: framework.GVK(gvk), ActionType: framework.Add | framework.Update | framework.Delete}, + return []framework.ClusterEventWithHint{ + {Event: framework.ClusterEvent{Resource: framework.GVK(gvk), ActionType: framework.Add | framework.Update | framework.Delete}}, } } diff --git a/pkg/scheduler/plugins/deviceshare/plugin_test.go b/pkg/scheduler/plugins/deviceshare/plugin_test.go index f27160d42..c710259fa 100644 --- a/pkg/scheduler/plugins/deviceshare/plugin_test.go +++ b/pkg/scheduler/plugins/deviceshare/plugin_test.go @@ -50,7 +50,7 @@ import ( koordinatorinformers "github.com/koordinator-sh/koordinator/pkg/client/informers/externalversions" koordfeatures "github.com/koordinator-sh/koordinator/pkg/features" schedulerconfig "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" - v1beta2schedulerconfig "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta2" + v1beta3schedulerconfig "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta3" "github.com/koordinator-sh/koordinator/pkg/scheduler/frameworkext" frameworkexttesting "github.com/koordinator-sh/koordinator/pkg/scheduler/frameworkext/testing" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/reservation" @@ -99,6 +99,14 @@ func (f *testSharedLister) NodeInfos() framework.NodeInfoLister { return f } +func (f *testSharedLister) StorageInfos() framework.StorageInfoLister { + return f +} + +func (f *testSharedLister) IsPVCUsedByPods(key string) bool { + return false +} + func (f *testSharedLister) List() ([]*framework.NodeInfo, error) { return f.nodeInfos, nil } @@ -146,6 +154,7 @@ func newPluginTestSuit(t *testing.T, nodes []*corev1.Node) *pluginTestSuit { snapshot := newTestSharedLister(nil, nodes) fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", runtime.WithClientSet(cs), @@ -162,10 +171,10 @@ func newPluginTestSuit(t *testing.T, nodes []*corev1.Node) *pluginTestSuit { } func getDefaultArgs() *schedulerconfig.DeviceShareArgs { - v1beta2Args := &v1beta2schedulerconfig.DeviceShareArgs{} - v1beta2schedulerconfig.SetDefaults_DeviceShareArgs(v1beta2Args) + v1beta3Args := &v1beta3schedulerconfig.DeviceShareArgs{} + v1beta3schedulerconfig.SetDefaults_DeviceShareArgs(v1beta3Args) args := &schedulerconfig.DeviceShareArgs{} - _ = v1beta2schedulerconfig.Convert_v1beta2_DeviceShareArgs_To_config_DeviceShareArgs(v1beta2Args, args, nil) + _ = v1beta3schedulerconfig.Convert_v1beta3_DeviceShareArgs_To_config_DeviceShareArgs(v1beta3Args, args, nil) return args } @@ -187,6 +196,7 @@ func Test_New(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(cs, 0) snapshot := newTestSharedLister(nil, nil) fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", runtime.WithClientSet(cs), @@ -300,7 +310,8 @@ func Test_Plugin_PreFilterExtensions(t *testing.T) { } nd.updateCacheUsed(allocations, allocatedPod, true) - status = pl.PreFilterExtensions().RemovePod(context.TODO(), cycleState, pod, framework.NewPodInfo(allocatedPod), nodeInfo) + podInfo, _ := framework.NewPodInfo(allocatedPod) + status = pl.PreFilterExtensions().RemovePod(context.TODO(), cycleState, pod, podInfo, nodeInfo) assert.True(t, status.IsSuccess()) expectPreemptible := map[string]map[schedulingv1alpha1.DeviceType]deviceResources{ @@ -318,7 +329,8 @@ func Test_Plugin_PreFilterExtensions(t *testing.T) { assert.True(t, status.IsSuccess()) assert.Equal(t, expectPreemptible, state.preemptibleDevices) - status = pl.PreFilterExtensions().AddPod(context.TODO(), cycleState, pod, framework.NewPodInfo(allocatedPod), nodeInfo) + podInfo, _ = framework.NewPodInfo(allocatedPod) + status = pl.PreFilterExtensions().AddPod(context.TODO(), cycleState, pod, podInfo, nodeInfo) assert.True(t, status.IsSuccess()) expectPreemptible = map[string]map[schedulingv1alpha1.DeviceType]deviceResources{ node.Name: {}, @@ -424,7 +436,8 @@ func Test_Plugin_PreFilterExtensionsWithReservation(t *testing.T) { nd.updateCacheUsed(allocations, allocatedPod, true) reservationCache.rInfo.AddAssignedPod(allocatedPod) - status = pl.PreFilterExtensions().RemovePod(context.TODO(), cycleState, pod, framework.NewPodInfo(allocatedPod), nodeInfo) + podInfo, _ := framework.NewPodInfo(allocatedPod) + status = pl.PreFilterExtensions().RemovePod(context.TODO(), cycleState, pod, podInfo, nodeInfo) assert.True(t, status.IsSuccess()) expectPreemptible := map[string]map[types.UID]map[schedulingv1alpha1.DeviceType]deviceResources{ @@ -444,7 +457,8 @@ func Test_Plugin_PreFilterExtensionsWithReservation(t *testing.T) { assert.True(t, status.IsSuccess()) assert.Equal(t, expectPreemptible, state.preemptibleInRRs) - status = pl.PreFilterExtensions().AddPod(context.TODO(), cycleState, pod, framework.NewPodInfo(allocatedPod), nodeInfo) + podInfo, _ = framework.NewPodInfo(allocatedPod) + status = pl.PreFilterExtensions().AddPod(context.TODO(), cycleState, pod, podInfo, nodeInfo) assert.True(t, status.IsSuccess()) expectPreemptible = map[string]map[types.UID]map[schedulingv1alpha1.DeviceType]deviceResources{ "test-node-1": { @@ -454,7 +468,8 @@ func Test_Plugin_PreFilterExtensionsWithReservation(t *testing.T) { assert.Equal(t, expectPreemptible, state.preemptibleInRRs) state.preemptibleInRRs = map[string]map[types.UID]map[schedulingv1alpha1.DeviceType]deviceResources{} - status = pl.PreFilterExtensions().AddPod(context.TODO(), cycleState, pod, framework.NewPodInfo(allocatedPod), nodeInfo) + podInfo, _ = framework.NewPodInfo(allocatedPod) + status = pl.PreFilterExtensions().AddPod(context.TODO(), cycleState, pod, podInfo, nodeInfo) assert.True(t, status.IsSuccess()) expectPreemptible = map[string]map[types.UID]map[schedulingv1alpha1.DeviceType]deviceResources{ "test-node-1": { diff --git a/pkg/scheduler/plugins/deviceshare/utils.go b/pkg/scheduler/plugins/deviceshare/utils.go index c53e073ce..6d53359ea 100644 --- a/pkg/scheduler/plugins/deviceshare/utils.go +++ b/pkg/scheduler/plugins/deviceshare/utils.go @@ -230,7 +230,7 @@ func preparePod(pod *corev1.Pod) (state *preFilterState, status *framework.Statu } func GetPodDeviceRequests(pod *corev1.Pod) (map[schedulingv1alpha1.DeviceType]corev1.ResourceList, error) { - podRequests, _ := resourceapi.PodRequestsAndLimits(pod) + podRequests := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}) podRequests = quotav1.RemoveZeros(podRequests) var requests map[schedulingv1alpha1.DeviceType]corev1.ResourceList diff --git a/pkg/scheduler/plugins/elasticquota/controller.go b/pkg/scheduler/plugins/elasticquota/controller.go index d30b9f765..68f1b1781 100644 --- a/pkg/scheduler/plugins/elasticquota/controller.go +++ b/pkg/scheduler/plugins/elasticquota/controller.go @@ -31,10 +31,10 @@ import ( "k8s.io/apimachinery/pkg/util/wait" quotav1 "k8s.io/apiserver/pkg/quota/v1" "k8s.io/klog/v2" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" - "sigs.k8s.io/scheduler-plugins/pkg/util" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/util" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/elasticquota/core" koordutil "github.com/koordinator-sh/koordinator/pkg/util" ) diff --git a/pkg/scheduler/plugins/elasticquota/controller_test.go b/pkg/scheduler/plugins/elasticquota/controller_test.go index d63b1690c..d90540e31 100644 --- a/pkg/scheduler/plugins/elasticquota/controller_test.go +++ b/pkg/scheduler/plugins/elasticquota/controller_test.go @@ -32,9 +32,9 @@ import ( "k8s.io/apimachinery/pkg/types" quotav1 "k8s.io/apiserver/pkg/quota/v1" testing2 "k8s.io/kubernetes/pkg/scheduler/testing" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/elasticquota/core" ) @@ -415,42 +415,44 @@ loopChan: } } - var dtoMetrics []dto.Metric + var dtoMetrics []*dto.Metric for _, v := range ms { m := dto.Metric{} assert.NoError(t, v.Write(&m)) - dtoMetrics = append(dtoMetrics, m) + dtoMetrics = append(dtoMetrics, &m) } sort.Slice(dtoMetrics, func(i, j int) bool { return dtoMetrics[i].String() < dtoMetrics[j].String() }) expect := []string{ - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, - `label: label: label: label: label: label: gauge: `, + `{"label":[{"name":"field","value":"allocated"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"cpu"},{"name":"tree","value":"tree-1"}],"gauge":{"value":7000}}`, + `{"label":[{"name":"field","value":"allocated"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"memory"},{"name":"tree","value":"tree-1"}],"gauge":{"value":50}}`, + `{"label":[{"name":"field","value":"child-request"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"cpu"},{"name":"tree","value":"tree-1"}],"gauge":{"value":6000}}`, + `{"label":[{"name":"field","value":"child-request"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"memory"},{"name":"tree","value":"tree-1"}],"gauge":{"value":50}}`, + `{"label":[{"name":"field","value":"guaranteed"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"cpu"},{"name":"tree","value":"tree-1"}],"gauge":{"value":8000}}`, + `{"label":[{"name":"field","value":"guaranteed"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"memory"},{"name":"tree","value":"tree-1"}],"gauge":{"value":50}}`, + `{"label":[{"name":"field","value":"max"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"cpu"},{"name":"tree","value":"tree-1"}],"gauge":{"value":4000}}`, + `{"label":[{"name":"field","value":"max"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"memory"},{"name":"tree","value":"tree-1"}],"gauge":{"value":200}}`, + `{"label":[{"name":"field","value":"min"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"cpu"},{"name":"tree","value":"tree-1"}],"gauge":{"value":2000}}`, + `{"label":[{"name":"field","value":"min"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"memory"},{"name":"tree","value":"tree-1"}],"gauge":{"value":100}}`, + `{"label":[{"name":"field","value":"non-preemptible-request"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"cpu"},{"name":"tree","value":"tree-1"}],"gauge":{"value":3000}}`, + `{"label":[{"name":"field","value":"non-preemptible-request"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"memory"},{"name":"tree","value":"tree-1"}],"gauge":{"value":50}}`, + `{"label":[{"name":"field","value":"non-preemptible-used"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"cpu"},{"name":"tree","value":"tree-1"}],"gauge":{"value":2000}}`, + `{"label":[{"name":"field","value":"non-preemptible-used"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"memory"},{"name":"tree","value":"tree-1"}],"gauge":{"value":50}}`, + `{"label":[{"name":"field","value":"request"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"cpu"},{"name":"tree","value":"tree-1"}],"gauge":{"value":4000}}`, + `{"label":[{"name":"field","value":"request"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"memory"},{"name":"tree","value":"tree-1"}],"gauge":{"value":50}}`, + `{"label":[{"name":"field","value":"runtime"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"cpu"},{"name":"tree","value":"tree-1"}],"gauge":{"value":5000}}`, + `{"label":[{"name":"field","value":"runtime"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"memory"},{"name":"tree","value":"tree-1"}],"gauge":{"value":50}}`, + `{"label":[{"name":"field","value":"unschedulable-resource"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"cpu"},{"name":"tree","value":"tree-1"}],"gauge":{"value":4000}}`, + `{"label":[{"name":"field","value":"unschedulable-resource"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"memory"},{"name":"tree","value":"tree-1"}],"gauge":{"value":8}}`, + `{"label":[{"name":"field","value":"used"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"cpu"},{"name":"tree","value":"tree-1"}],"gauge":{"value":1000}}`, + `{"label":[{"name":"field","value":"used"},{"name":"is_parent","value":"true"},{"name":"name","value":"test-eq"},{"name":"parent","value":"root"},{"name":"resource","value":"memory"},{"name":"tree","value":"tree-1"}],"gauge":{"value":50}}`, } assert.Equal(t, len(expect), len(dtoMetrics)) - for i, v := range dtoMetrics { - assert.Equal(t, expect[i], v.String()) + for i := range dtoMetrics { + v := dtoMetrics[i] + jsonStrBytes, _ := json.Marshal(v) + assert.Equal(t, expect[i], string(jsonStrBytes)) } } diff --git a/pkg/scheduler/plugins/elasticquota/core/group_quota_manager.go b/pkg/scheduler/plugins/elasticquota/core/group_quota_manager.go index 6260abf54..b8eb2f688 100644 --- a/pkg/scheduler/plugins/elasticquota/core/group_quota_manager.go +++ b/pkg/scheduler/plugins/elasticquota/core/group_quota_manager.go @@ -25,9 +25,9 @@ import ( quotav1 "k8s.io/apiserver/pkg/quota/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/features" "github.com/koordinator-sh/koordinator/pkg/util" ) @@ -596,14 +596,14 @@ func (gqm *GroupQuotaManager) GetAllQuotaNames() map[string]struct{} { func (gqm *GroupQuotaManager) updatePodRequestNoLock(quotaName string, oldPod, newPod *v1.Pod) { var oldPodReq, newPodReq, oldNonPreemptibleRequest, newNonPreemptibleRequest v1.ResourceList if oldPod != nil { - oldPodReq, _ = PodRequestsAndLimits(oldPod) + oldPodReq = PodRequestsAndLimits(oldPod) if extension.IsPodNonPreemptible(oldPod) { oldNonPreemptibleRequest = oldPodReq } } if newPod != nil { - newPodReq, _ = PodRequestsAndLimits(newPod) + newPodReq = PodRequestsAndLimits(newPod) if extension.IsPodNonPreemptible(newPod) { newNonPreemptibleRequest = newPodReq } @@ -630,14 +630,14 @@ func (gqm *GroupQuotaManager) updatePodUsedNoLock(quotaName string, oldPod, newP var oldPodUsed, newPodUsed, oldNonPreemptibleUsed, newNonPreemptibleUsed v1.ResourceList if oldPod != nil { - oldPodUsed, _ = PodRequestsAndLimits(oldPod) + oldPodUsed = PodRequestsAndLimits(oldPod) if extension.IsPodNonPreemptible(oldPod) { oldNonPreemptibleUsed = oldPodUsed } } if newPod != nil { - newPodUsed, _ = PodRequestsAndLimits(newPod) + newPodUsed = PodRequestsAndLimits(newPod) if extension.IsPodNonPreemptible(newPod) { newNonPreemptibleUsed = newPodUsed } diff --git a/pkg/scheduler/plugins/elasticquota/core/group_quota_manager_test.go b/pkg/scheduler/plugins/elasticquota/core/group_quota_manager_test.go index a9cfe7fbc..acaaa5929 100644 --- a/pkg/scheduler/plugins/elasticquota/core/group_quota_manager_test.go +++ b/pkg/scheduler/plugins/elasticquota/core/group_quota_manager_test.go @@ -31,9 +31,9 @@ import ( k8sfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" schetesting "k8s.io/kubernetes/pkg/scheduler/testing" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/features" utilfeature "github.com/koordinator-sh/koordinator/pkg/util/feature" ) diff --git a/pkg/scheduler/plugins/elasticquota/core/helper.go b/pkg/scheduler/plugins/elasticquota/core/helper.go index 99d27d69d..0d5025db9 100644 --- a/pkg/scheduler/plugins/elasticquota/core/helper.go +++ b/pkg/scheduler/plugins/elasticquota/core/helper.go @@ -24,9 +24,11 @@ import ( "github.com/koordinator-sh/koordinator/pkg/features" ) -func PodRequestsAndLimits(pod *corev1.Pod) (reqs, limits corev1.ResourceList) { +func PodRequestsAndLimits(pod *corev1.Pod) (reqs corev1.ResourceList) { if k8sfeature.DefaultFeatureGate.Enabled(features.ElasticQuotaIgnorePodOverhead) { - return apiresource.PodRequestsAndLimitsWithoutOverhead(pod) + return apiresource.PodRequests(pod, apiresource.PodResourcesOptions{ + ExcludeOverhead: true, + }) } - return apiresource.PodRequestsAndLimits(pod) + return apiresource.PodRequests(pod, apiresource.PodResourcesOptions{}) } diff --git a/pkg/scheduler/plugins/elasticquota/core/helper_test.go b/pkg/scheduler/plugins/elasticquota/core/helper_test.go index 7dbaefecd..1ca4ddb3a 100644 --- a/pkg/scheduler/plugins/elasticquota/core/helper_test.go +++ b/pkg/scheduler/plugins/elasticquota/core/helper_test.go @@ -30,11 +30,10 @@ import ( func TestPodRequestsAndLimits(t *testing.T) { tests := []struct { - name string - overhead corev1.ResourceList - ignore bool - wantReqs corev1.ResourceList - wantLimits corev1.ResourceList + name string + overhead corev1.ResourceList + ignore bool + wantReqs corev1.ResourceList }{ { name: "ElasticQuotaIgnorePodOverhead=false", @@ -47,10 +46,6 @@ func TestPodRequestsAndLimits(t *testing.T) { corev1.ResourceCPU: *resource.NewMilliQuantity(5000, resource.DecimalSI), corev1.ResourceMemory: *resource.NewQuantity(9*1024*1024*1024, resource.BinarySI), }, - wantLimits: corev1.ResourceList{ - corev1.ResourceCPU: *resource.NewMilliQuantity(9000, resource.DecimalSI), - corev1.ResourceMemory: *resource.NewQuantity(17*1024*1024*1024, resource.BinarySI), - }, }, { name: "ElasticQuotaIgnorePodOverhead=true", @@ -63,10 +58,6 @@ func TestPodRequestsAndLimits(t *testing.T) { corev1.ResourceCPU: *resource.NewMilliQuantity(4000, resource.DecimalSI), corev1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), }, - wantLimits: corev1.ResourceList{ - corev1.ResourceCPU: *resource.NewMilliQuantity(8000, resource.DecimalSI), - corev1.ResourceMemory: *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI), - }, }, } for _, tt := range tests { @@ -91,9 +82,8 @@ func TestPodRequestsAndLimits(t *testing.T) { Overhead: tt.overhead, }, } - reqs, limits := PodRequestsAndLimits(pod) + reqs := PodRequestsAndLimits(pod) assert.Equal(t, tt.wantReqs, reqs) - assert.Equal(t, tt.wantLimits, limits) }) } diff --git a/pkg/scheduler/plugins/elasticquota/core/quota_info.go b/pkg/scheduler/plugins/elasticquota/core/quota_info.go index e9d665ac9..d04264a48 100644 --- a/pkg/scheduler/plugins/elasticquota/core/quota_info.go +++ b/pkg/scheduler/plugins/elasticquota/core/quota_info.go @@ -24,9 +24,9 @@ import ( quotav1 "k8s.io/apiserver/pkg/quota/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/features" ) @@ -573,7 +573,7 @@ type PodInfo struct { } func NewPodInfo(pod *v1.Pod) *PodInfo { - res, _ := PodRequestsAndLimits(pod) + res := PodRequestsAndLimits(pod) return &PodInfo{ pod: pod, resource: res, diff --git a/pkg/scheduler/plugins/elasticquota/core/runtime_quota_calculator_test.go b/pkg/scheduler/plugins/elasticquota/core/runtime_quota_calculator_test.go index 0e35e9f30..356c4ff8d 100644 --- a/pkg/scheduler/plugins/elasticquota/core/runtime_quota_calculator_test.go +++ b/pkg/scheduler/plugins/elasticquota/core/runtime_quota_calculator_test.go @@ -26,9 +26,9 @@ import ( "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v12 "k8s.io/apiserver/pkg/quota/v1" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" ) func TestQuotaInfo_GetLimitRequest(t *testing.T) { diff --git a/pkg/scheduler/plugins/elasticquota/plugin.go b/pkg/scheduler/plugins/elasticquota/plugin.go index 3e84ada17..2acfa226f 100644 --- a/pkg/scheduler/plugins/elasticquota/plugin.go +++ b/pkg/scheduler/plugins/elasticquota/plugin.go @@ -33,13 +33,13 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework/preemption" "k8s.io/kubernetes/pkg/scheduler/metrics" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling" - apiv1alpha1 "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" - "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" - "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions" - "sigs.k8s.io/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling" + apiv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/listers/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/validation" "github.com/koordinator-sh/koordinator/pkg/scheduler/frameworkext" @@ -197,13 +197,13 @@ func (g *Plugin) Name() string { return Name } -func (g *Plugin) EventsToRegister() []framework.ClusterEvent { +func (g *Plugin) EventsToRegister() []framework.ClusterEventWithHint { // To register a custom event, follow the naming convention at: // https://git.k8s.io/kubernetes/pkg/scheduler/eventhandlers.go#L403-L410 eqGVK := fmt.Sprintf("elasticquotas.v1alpha1.%v", scheduling.GroupName) - return []framework.ClusterEvent{ - {Resource: framework.Pod, ActionType: framework.Delete}, - {Resource: framework.GVK(eqGVK), ActionType: framework.All}, + return []framework.ClusterEventWithHint{ + {Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: framework.Delete}}, + {Event: framework.ClusterEvent{Resource: framework.GVK(eqGVK), ActionType: framework.All}}, } } @@ -227,8 +227,7 @@ func (g *Plugin) PreFilter(ctx context.Context, cycleState *framework.CycleState } state := g.snapshotPostFilterState(quotaInfo, cycleState) - podRequest, _ := core.PodRequestsAndLimits(pod) - + podRequest := core.PodRequestsAndLimits(pod) used := quotav1.Mask(quotav1.Add(podRequest, state.used), quotav1.ResourceNames(podRequest)) if isLessEqual, exceedDimensions := quotav1.LessThanOrEqual(used, state.usedLimit); !isLessEqual { return nil, framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Insufficient quotas, "+ @@ -272,7 +271,7 @@ func (g *Plugin) AddPod(ctx context.Context, state *framework.CycleState, podToS } if postFilterState.quotaInfo.IsPodExist(podInfoToAdd.Pod) { - podReq, _ := core.PodRequestsAndLimits(podInfoToAdd.Pod) + podReq := core.PodRequestsAndLimits(podInfoToAdd.Pod) postFilterState.used = quotav1.Add(postFilterState.used, podReq) } return framework.NewStatus(framework.Success, "") @@ -292,7 +291,7 @@ func (g *Plugin) RemovePod(ctx context.Context, state *framework.CycleState, pod } if postFilterState.quotaInfo.IsPodExist(podInfoToRemove.Pod) { - podReq, _ := core.PodRequestsAndLimits(podInfoToRemove.Pod) + podReq := core.PodRequestsAndLimits(podInfoToRemove.Pod) postFilterState.used = quotav1.SubtractWithNonNegativeResult(postFilterState.used, podReq) } return framework.NewStatus(framework.Success, "") diff --git a/pkg/scheduler/plugins/elasticquota/plugin_helper.go b/pkg/scheduler/plugins/elasticquota/plugin_helper.go index 5eea22c74..aa8a83bca 100644 --- a/pkg/scheduler/plugins/elasticquota/plugin_helper.go +++ b/pkg/scheduler/plugins/elasticquota/plugin_helper.go @@ -29,9 +29,9 @@ import ( k8sfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler/framework" - schedulerv1alpha1 "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + schedulerv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/features" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/elasticquota/core" ) diff --git a/pkg/scheduler/plugins/elasticquota/plugin_helper_test.go b/pkg/scheduler/plugins/elasticquota/plugin_helper_test.go index b28e64a71..23ce3e100 100644 --- a/pkg/scheduler/plugins/elasticquota/plugin_helper_test.go +++ b/pkg/scheduler/plugins/elasticquota/plugin_helper_test.go @@ -24,9 +24,9 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerv1alpha1 "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + schedulerv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/elasticquota/core" ) diff --git a/pkg/scheduler/plugins/elasticquota/plugin_test.go b/pkg/scheduler/plugins/elasticquota/plugin_test.go index 40ddb830b..48a7a485d 100644 --- a/pkg/scheduler/plugins/elasticquota/plugin_test.go +++ b/pkg/scheduler/plugins/elasticquota/plugin_test.go @@ -50,15 +50,15 @@ import ( schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" imageutils "k8s.io/kubernetes/test/utils/image" "k8s.io/utils/pointer" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" - pgclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" - pgfake "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned/fake" "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + pgclientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" + pgfake "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake" "github.com/koordinator-sh/koordinator/pkg/client/clientset/versioned/fake" koordinatorinformers "github.com/koordinator-sh/koordinator/pkg/client/informers/externalversions" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" - "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta2" + "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta3" "github.com/koordinator-sh/koordinator/pkg/scheduler/frameworkext" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/elasticquota/core" ) @@ -115,10 +115,10 @@ var ( func newPluginTestSuit(t *testing.T, nodes []*corev1.Node) *pluginTestSuit { setLoglevel("5") - var v1beta2args v1beta2.ElasticQuotaArgs - v1beta2.SetDefaults_ElasticQuotaArgs(&v1beta2args) + var v1beta3args v1beta3.ElasticQuotaArgs + v1beta3.SetDefaults_ElasticQuotaArgs(&v1beta3args) var elasticQuotaArgs config.ElasticQuotaArgs - err := v1beta2.Convert_v1beta2_ElasticQuotaArgs_To_config_ElasticQuotaArgs(&v1beta2args, &elasticQuotaArgs, nil) + err := v1beta3.Convert_v1beta3_ElasticQuotaArgs_To_config_ElasticQuotaArgs(&v1beta3args, &elasticQuotaArgs, nil) assert.NoError(t, err) elasticQuotaPluginConfig := schedulerconfig.PluginConfig{ @@ -175,6 +175,7 @@ func newPluginTestSuit(t *testing.T, nodes []*corev1.Node) *pluginTestSuit { flag.Parse() } fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", runtime.WithClientSet(cs), @@ -194,10 +195,10 @@ func newPluginTestSuit(t *testing.T, nodes []*corev1.Node) *pluginTestSuit { func newPluginTestSuitWithPod(t *testing.T, nodes []*corev1.Node, pods []*corev1.Pod) *pluginTestSuit { setLoglevel("5") - var v1beta2args v1beta2.ElasticQuotaArgs - v1beta2.SetDefaults_ElasticQuotaArgs(&v1beta2args) + var v1beta3args v1beta3.ElasticQuotaArgs + v1beta3.SetDefaults_ElasticQuotaArgs(&v1beta3args) var elasticQuotaArgs config.ElasticQuotaArgs - err := v1beta2.Convert_v1beta2_ElasticQuotaArgs_To_config_ElasticQuotaArgs(&v1beta2args, &elasticQuotaArgs, nil) + err := v1beta3.Convert_v1beta3_ElasticQuotaArgs_To_config_ElasticQuotaArgs(&v1beta3args, &elasticQuotaArgs, nil) assert.NoError(t, err) elasticQuotaPluginConfig := schedulerconfig.PluginConfig{ @@ -256,6 +257,7 @@ func newPluginTestSuitWithPod(t *testing.T, nodes []*corev1.Node, pods []*corev1 flag.Parse() } fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", runtime.WithClientSet(cs), @@ -283,6 +285,14 @@ type testSharedLister struct { nodeInfoMap map[string]*framework.NodeInfo } +func (f *testSharedLister) StorageInfos() framework.StorageInfoLister { + return f +} + +func (f *testSharedLister) IsPVCUsedByPods(key string) bool { + return false +} + func (f *testSharedLister) NodeInfos() framework.NodeInfoLister { return f } @@ -759,7 +769,7 @@ func TestPlugin_PreFilter_CheckParent(t *testing.T) { qi1.Lock() qi1.CalculateInfo.Runtime = tt.parentRuntime.DeepCopy() qi1.UnLock() - podRequests, _ := core.PodRequestsAndLimits(tt.pod) + podRequests := core.PodRequestsAndLimits(tt.pod) status := *gp.checkQuotaRecursive(tt.quotaInfo.Name, []string{tt.quotaInfo.Name}, podRequests) assert.Equal(t, tt.expectedStatus, status) }) @@ -1134,7 +1144,7 @@ func (npm *nominatedPodMap) delete(p *corev1.Pod) { } // UpdateNominatedPod updates the with . -func (npm *nominatedPodMap) UpdateNominatedPod(oldPod *corev1.Pod, newPodInfo *framework.PodInfo) { +func (npm *nominatedPodMap) UpdateNominatedPod(logr klog.Logger, oldPod *corev1.Pod, newPodInfo *framework.PodInfo) { npm.Lock() defer npm.Unlock() // In some cases, an Update event with no "NominatedNode" present is received right @@ -1181,7 +1191,7 @@ func (npm *nominatedPodMap) DeleteNominatedPodIfExists(pod *corev1.Pod) { // This is called during the preemption process after a node is nominated to run // the pod. We update the structure before sending a request to update the pod // object to avoid races with the following scheduling cycles. -func (npm *nominatedPodMap) AddNominatedPod(pi *framework.PodInfo, nominatingInfo *framework.NominatingInfo) { +func (npm *nominatedPodMap) AddNominatedPod(logger klog.Logger, pi *framework.PodInfo, nominatingInfo *framework.NominatingInfo) { npm.Lock() npm.add(pi, nominatingInfo.NominatedNodeName) npm.Unlock() diff --git a/pkg/scheduler/plugins/elasticquota/preempt.go b/pkg/scheduler/plugins/elasticquota/preempt.go index 9be5c39b9..f689740c8 100644 --- a/pkg/scheduler/plugins/elasticquota/preempt.go +++ b/pkg/scheduler/plugins/elasticquota/preempt.go @@ -31,12 +31,12 @@ import ( corev1helpers "k8s.io/component-helpers/scheduling/corev1" "k8s.io/klog/v2" extenderv1 "k8s.io/kube-scheduler/extender/v1" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework/preemption" "k8s.io/kubernetes/pkg/scheduler/util" "github.com/koordinator-sh/koordinator/apis/extension" + koordfeature "github.com/koordinator-sh/koordinator/pkg/features" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/elasticquota/core" ) @@ -170,7 +170,7 @@ func (g *Plugin) SelectVictimsOnNode( violatingVictims, nonViolatingVictims := filterPodsWithPDBViolation(potentialVictims, pdbs) postFilterState, _ := getPostFilterState(state) - podReq, _ := core.PodRequestsAndLimits(pod) + podReq := core.PodRequestsAndLimits(pod) reprievePod := func(pi *framework.PodInfo) (bool, error) { if err := addPod(pi); err != nil { @@ -268,7 +268,7 @@ func filterPodsWithPDBViolation(podInfos []*framework.PodInfo, pdbs []*policy.Po // TODO if the kubernetes version is before 1.20, will return nil. func getPDBLister(handle framework.Handle) policylisters.PodDisruptionBudgetLister { - if !feature.DefaultFeatureGate.Enabled(features.PodDisruptionBudget) { + if !feature.DefaultFeatureGate.Enabled(koordfeature.PodDisruptionBudget) { return nil } diff --git a/pkg/scheduler/plugins/elasticquota/quota_handler.go b/pkg/scheduler/plugins/elasticquota/quota_handler.go index f2617d148..c4d681a81 100644 --- a/pkg/scheduler/plugins/elasticquota/quota_handler.go +++ b/pkg/scheduler/plugins/elasticquota/quota_handler.go @@ -23,9 +23,9 @@ import ( quotav1 "k8s.io/apiserver/pkg/quota/v1" k8sfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" - schedulerv1alpha1 "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + schedulerv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" koordfeatures "github.com/koordinator-sh/koordinator/pkg/features" "github.com/koordinator-sh/koordinator/pkg/scheduler/plugins/elasticquota/core" ) diff --git a/pkg/scheduler/plugins/elasticquota/quota_overuse_revoke.go b/pkg/scheduler/plugins/elasticquota/quota_overuse_revoke.go index 8c26601f3..073c533ab 100644 --- a/pkg/scheduler/plugins/elasticquota/quota_overuse_revoke.go +++ b/pkg/scheduler/plugins/elasticquota/quota_overuse_revoke.go @@ -114,7 +114,7 @@ func (monitor *QuotaOverUsedGroupMonitor) getToRevokePodList(quotaName string) [ if extension.IsPodNonPreemptible(pod) { continue } - podReq, _ := core.PodRequestsAndLimits(pod) + podReq := core.PodRequestsAndLimits(pod) used = quotav1.Mask(quotav1.Subtract(used, podReq), quotav1.ResourceNames(podReq)) tryAssignBackPodCache = append(tryAssignBackPodCache, pod) } @@ -132,7 +132,7 @@ func (monitor *QuotaOverUsedGroupMonitor) getToRevokePodList(quotaName string) [ realRevokePodCache := make([]*v1.Pod, 0) for index := len(tryAssignBackPodCache) - 1; index >= 0; index-- { pod := tryAssignBackPodCache[index] - podRequest, _ := core.PodRequestsAndLimits(pod) + podRequest := core.PodRequestsAndLimits(pod) used = quotav1.Mask(quotav1.Add(used, podRequest), quotav1.ResourceNames(podRequest)) if canAssignBack, _ := quotav1.LessThanOrEqual(used, runtime); !canAssignBack { used = quotav1.Subtract(used, podRequest) diff --git a/pkg/scheduler/plugins/loadaware/estimator/default_estimator.go b/pkg/scheduler/plugins/loadaware/estimator/default_estimator.go index 8b322bdbb..748cb6de7 100644 --- a/pkg/scheduler/plugins/loadaware/estimator/default_estimator.go +++ b/pkg/scheduler/plugins/loadaware/estimator/default_estimator.go @@ -59,7 +59,7 @@ func (e *DefaultEstimator) EstimatePod(pod *corev1.Pod) (map[corev1.ResourceName } func estimatedPodUsed(pod *corev1.Pod, resourceWeights map[corev1.ResourceName]int64, scalingFactors map[corev1.ResourceName]int64) map[corev1.ResourceName]int64 { - requests, limits := resourceapi.PodRequestsAndLimits(pod) + requests, limits := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}), resourceapi.PodLimits(pod, resourceapi.PodResourcesOptions{}) estimatedUsed := make(map[corev1.ResourceName]int64) priorityClass := extension.GetPodPriorityClassWithDefault(pod) for resourceName := range resourceWeights { diff --git a/pkg/scheduler/plugins/loadaware/estimator/default_estimator_test.go b/pkg/scheduler/plugins/loadaware/estimator/default_estimator_test.go index 1f6cbaaf5..eda48b5a4 100644 --- a/pkg/scheduler/plugins/loadaware/estimator/default_estimator_test.go +++ b/pkg/scheduler/plugins/loadaware/estimator/default_estimator_test.go @@ -27,7 +27,7 @@ import ( "github.com/koordinator-sh/koordinator/apis/extension" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" - "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta2" + "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta3" ) func TestDefaultEstimatorEstimatePod(t *testing.T) { @@ -232,11 +232,11 @@ func TestDefaultEstimatorEstimatePod(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var v1beta2args v1beta2.LoadAwareSchedulingArgs - v1beta2args.EstimatedScalingFactors = tt.scalarFactors - v1beta2.SetDefaults_LoadAwareSchedulingArgs(&v1beta2args) + var v1beta3args v1beta3.LoadAwareSchedulingArgs + v1beta3args.EstimatedScalingFactors = tt.scalarFactors + v1beta3.SetDefaults_LoadAwareSchedulingArgs(&v1beta3args) var loadAwareSchedulingArgs config.LoadAwareSchedulingArgs - err := v1beta2.Convert_v1beta2_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta2args, &loadAwareSchedulingArgs, nil) + err := v1beta3.Convert_v1beta3_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta3args, &loadAwareSchedulingArgs, nil) assert.NoError(t, err) estimator, err := NewDefaultEstimator(&loadAwareSchedulingArgs, nil) assert.NoError(t, err) @@ -313,10 +313,10 @@ func TestDefaultEstimatorEstimateNode(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var v1beta2args v1beta2.LoadAwareSchedulingArgs - v1beta2.SetDefaults_LoadAwareSchedulingArgs(&v1beta2args) + var v1beta3args v1beta3.LoadAwareSchedulingArgs + v1beta3.SetDefaults_LoadAwareSchedulingArgs(&v1beta3args) var loadAwareSchedulingArgs config.LoadAwareSchedulingArgs - err := v1beta2.Convert_v1beta2_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta2args, &loadAwareSchedulingArgs, nil) + err := v1beta3.Convert_v1beta3_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta3args, &loadAwareSchedulingArgs, nil) assert.NoError(t, err) estimator, err := NewDefaultEstimator(&loadAwareSchedulingArgs, nil) assert.NoError(t, err) diff --git a/pkg/scheduler/plugins/loadaware/load_aware.go b/pkg/scheduler/plugins/loadaware/load_aware.go index 39b292815..5baa15102 100644 --- a/pkg/scheduler/plugins/loadaware/load_aware.go +++ b/pkg/scheduler/plugins/loadaware/load_aware.go @@ -111,12 +111,12 @@ func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) func (p *Plugin) Name() string { return Name } -func (p *Plugin) EventsToRegister() []framework.ClusterEvent { +func (p *Plugin) EventsToRegister() []framework.ClusterEventWithHint { // To register a custom event, follow the naming convention at: // https://github.com/kubernetes/kubernetes/blob/e1ad9bee5bba8fbe85a6bf6201379ce8b1a611b1/pkg/scheduler/eventhandlers.go#L415-L422 gvk := fmt.Sprintf("nodemetrics.%v.%v", slov1alpha1.GroupVersion.Version, slov1alpha1.GroupVersion.Group) - return []framework.ClusterEvent{ - {Resource: framework.GVK(gvk), ActionType: framework.Add | framework.Update | framework.Delete}, + return []framework.ClusterEventWithHint{ + {Event: framework.ClusterEvent{Resource: framework.GVK(gvk), ActionType: framework.Add | framework.Update | framework.Delete}}, } } diff --git a/pkg/scheduler/plugins/loadaware/load_aware_test.go b/pkg/scheduler/plugins/loadaware/load_aware_test.go index 5e8d6f364..31daad858 100644 --- a/pkg/scheduler/plugins/loadaware/load_aware_test.go +++ b/pkg/scheduler/plugins/loadaware/load_aware_test.go @@ -44,7 +44,7 @@ import ( koordfake "github.com/koordinator-sh/koordinator/pkg/client/clientset/versioned/fake" koordinatorinformers "github.com/koordinator-sh/koordinator/pkg/client/informers/externalversions" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" - "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta2" + "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta3" "github.com/koordinator-sh/koordinator/pkg/scheduler/frameworkext" ) @@ -84,6 +84,14 @@ func newTestSharedLister(pods []*corev1.Pod, nodes []*corev1.Node) *testSharedLi } } +func (f *testSharedLister) StorageInfos() framework.StorageInfoLister { + return f +} + +func (f *testSharedLister) IsPVCUsedByPods(key string) bool { + return false +} + func (f *testSharedLister) NodeInfos() framework.NodeInfoLister { return f } @@ -105,10 +113,10 @@ func (f *testSharedLister) Get(nodeName string) (*framework.NodeInfo, error) { } func TestNew(t *testing.T) { - var v1beta2args v1beta2.LoadAwareSchedulingArgs - v1beta2.SetDefaults_LoadAwareSchedulingArgs(&v1beta2args) + var v1beta3args v1beta3.LoadAwareSchedulingArgs + v1beta3.SetDefaults_LoadAwareSchedulingArgs(&v1beta3args) var loadAwareSchedulingArgs config.LoadAwareSchedulingArgs - err := v1beta2.Convert_v1beta2_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta2args, &loadAwareSchedulingArgs, nil) + err := v1beta3.Convert_v1beta3_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta3args, &loadAwareSchedulingArgs, nil) assert.NoError(t, err) koordClientSet := koordfake.NewSimpleClientset() @@ -126,7 +134,7 @@ func TestNew(t *testing.T) { schedulertesting.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), schedulertesting.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), } - fh, err := schedulertesting.NewFramework(registeredPlugins, "koord-scheduler", + fh, err := schedulertesting.NewFramework(context.TODO(), registeredPlugins, "koord-scheduler", frameworkruntime.WithClientSet(cs), frameworkruntime.WithInformerFactory(informerFactory), frameworkruntime.WithSnapshotSharedLister(snapshot), @@ -199,10 +207,10 @@ func TestFilterExpiredNodeMetric(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var v1beta2args v1beta2.LoadAwareSchedulingArgs - v1beta2.SetDefaults_LoadAwareSchedulingArgs(&v1beta2args) + var v1beta3args v1beta3.LoadAwareSchedulingArgs + v1beta3.SetDefaults_LoadAwareSchedulingArgs(&v1beta3args) var loadAwareSchedulingArgs config.LoadAwareSchedulingArgs - err := v1beta2.Convert_v1beta2_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta2args, &loadAwareSchedulingArgs, nil) + err := v1beta3.Convert_v1beta3_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta3args, &loadAwareSchedulingArgs, nil) assert.NoError(t, err) koordClientSet := koordfake.NewSimpleClientset() @@ -229,7 +237,7 @@ func TestFilterExpiredNodeMetric(t *testing.T) { schedulertesting.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), schedulertesting.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), } - fh, err := schedulertesting.NewFramework(registeredPlugins, "koord-scheduler", + fh, err := schedulertesting.NewFramework(context.TODO(), registeredPlugins, "koord-scheduler", frameworkruntime.WithClientSet(cs), frameworkruntime.WithInformerFactory(informerFactory), frameworkruntime.WithSnapshotSharedLister(snapshot), @@ -263,7 +271,7 @@ func TestFilterUsage(t *testing.T) { name string usageThresholds map[corev1.ResourceName]int64 prodUsageThresholds map[corev1.ResourceName]int64 - aggregated *v1beta2.LoadAwareSchedulingAggregatedArgs + aggregated *v1beta3.LoadAwareSchedulingAggregatedArgs customUsageThresholds map[corev1.ResourceName]int64 customProdUsageThresholds map[corev1.ResourceName]int64 customAggregatedUsage *extension.CustomAggregatedUsage @@ -337,7 +345,7 @@ func TestFilterUsage(t *testing.T) { { name: "filter exceed p95 cpu usage", nodeName: "test-node-1", - aggregated: &v1beta2.LoadAwareSchedulingAggregatedArgs{ + aggregated: &v1beta3.LoadAwareSchedulingAggregatedArgs{ UsageThresholds: map[corev1.ResourceName]int64{ corev1.ResourceCPU: 60, }, @@ -803,20 +811,20 @@ func TestFilterUsage(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var v1beta2args v1beta2.LoadAwareSchedulingArgs - v1beta2args.FilterExpiredNodeMetrics = pointer.Bool(false) + var v1beta3args v1beta3.LoadAwareSchedulingArgs + v1beta3args.FilterExpiredNodeMetrics = pointer.Bool(false) if len(tt.usageThresholds) > 0 { - v1beta2args.UsageThresholds = tt.usageThresholds + v1beta3args.UsageThresholds = tt.usageThresholds } if len(tt.prodUsageThresholds) > 0 { - v1beta2args.ProdUsageThresholds = tt.prodUsageThresholds + v1beta3args.ProdUsageThresholds = tt.prodUsageThresholds } if tt.aggregated != nil { - v1beta2args.Aggregated = tt.aggregated + v1beta3args.Aggregated = tt.aggregated } - v1beta2.SetDefaults_LoadAwareSchedulingArgs(&v1beta2args) + v1beta3.SetDefaults_LoadAwareSchedulingArgs(&v1beta3args) var loadAwareSchedulingArgs config.LoadAwareSchedulingArgs - err := v1beta2.Convert_v1beta2_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta2args, &loadAwareSchedulingArgs, nil) + err := v1beta3.Convert_v1beta3_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta3args, &loadAwareSchedulingArgs, nil) assert.NoError(t, err) koordClientSet := koordfake.NewSimpleClientset() @@ -871,7 +879,7 @@ func TestFilterUsage(t *testing.T) { } snapshot := newTestSharedLister(nil, nodes) - fh, err := schedulertesting.NewFramework(registeredPlugins, "koord-scheduler", + fh, err := schedulertesting.NewFramework(context.TODO(), registeredPlugins, "koord-scheduler", frameworkruntime.WithClientSet(cs), frameworkruntime.WithInformerFactory(informerFactory), frameworkruntime.WithSnapshotSharedLister(snapshot), @@ -918,7 +926,7 @@ func TestScore(t *testing.T) { nodeName string nodeMetric *slov1alpha1.NodeMetric scoreAccordingProdUsage bool - aggregatedArgs *v1beta2.LoadAwareSchedulingAggregatedArgs + aggregatedArgs *v1beta3.LoadAwareSchedulingAggregatedArgs wantScore int64 wantStatus *framework.Status }{ @@ -1070,7 +1078,7 @@ func TestScore(t *testing.T) { }, { name: "score load node with p95", - aggregatedArgs: &v1beta2.LoadAwareSchedulingAggregatedArgs{ + aggregatedArgs: &v1beta3.LoadAwareSchedulingAggregatedArgs{ ScoreAggregationType: extension.P95, ScoreAggregatedDuration: &metav1.Duration{Duration: 5 * time.Minute}, }, @@ -1145,7 +1153,7 @@ func TestScore(t *testing.T) { }, { name: "score load node with p95 but have not reported usage", - aggregatedArgs: &v1beta2.LoadAwareSchedulingAggregatedArgs{ + aggregatedArgs: &v1beta3.LoadAwareSchedulingAggregatedArgs{ ScoreAggregationType: extension.P95, ScoreAggregatedDuration: &metav1.Duration{Duration: 5 * time.Minute}, }, @@ -1201,7 +1209,7 @@ func TestScore(t *testing.T) { }, { name: "score load node with p95 but have not reported usage and have assigned pods", - aggregatedArgs: &v1beta2.LoadAwareSchedulingAggregatedArgs{ + aggregatedArgs: &v1beta3.LoadAwareSchedulingAggregatedArgs{ ScoreAggregationType: extension.P95, ScoreAggregatedDuration: &metav1.Duration{Duration: 5 * time.Minute}, }, @@ -1753,14 +1761,14 @@ func TestScore(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var v1beta2args v1beta2.LoadAwareSchedulingArgs - v1beta2args.ScoreAccordingProdUsage = &tt.scoreAccordingProdUsage + var v1beta3args v1beta3.LoadAwareSchedulingArgs + v1beta3args.ScoreAccordingProdUsage = &tt.scoreAccordingProdUsage if tt.aggregatedArgs != nil { - v1beta2args.Aggregated = tt.aggregatedArgs + v1beta3args.Aggregated = tt.aggregatedArgs } - v1beta2.SetDefaults_LoadAwareSchedulingArgs(&v1beta2args) + v1beta3.SetDefaults_LoadAwareSchedulingArgs(&v1beta3args) var loadAwareSchedulingArgs config.LoadAwareSchedulingArgs - err := v1beta2.Convert_v1beta2_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta2args, &loadAwareSchedulingArgs, nil) + err := v1beta3.Convert_v1beta3_LoadAwareSchedulingArgs_To_config_LoadAwareSchedulingArgs(&v1beta3args, &loadAwareSchedulingArgs, nil) assert.NoError(t, err) koordClientSet := koordfake.NewSimpleClientset() @@ -1794,6 +1802,7 @@ func TestScore(t *testing.T) { schedulertesting.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), } fh, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", frameworkruntime.WithClientSet(cs), diff --git a/pkg/scheduler/plugins/loadaware/pod_assign_cache.go b/pkg/scheduler/plugins/loadaware/pod_assign_cache.go index 0f89e926b..0c2d2cb33 100644 --- a/pkg/scheduler/plugins/loadaware/pod_assign_cache.go +++ b/pkg/scheduler/plugins/loadaware/pod_assign_cache.go @@ -79,7 +79,7 @@ func (p *podAssignCache) unAssign(nodeName string, pod *corev1.Pod) { } } -func (p *podAssignCache) OnAdd(obj interface{}) { +func (p *podAssignCache) OnAdd(obj interface{}, isInInitialList bool) { pod, ok := obj.(*corev1.Pod) if !ok { return diff --git a/pkg/scheduler/plugins/loadaware/pod_assign_cache_test.go b/pkg/scheduler/plugins/loadaware/pod_assign_cache_test.go index 3dd26fab1..c7f7e0179 100644 --- a/pkg/scheduler/plugins/loadaware/pod_assign_cache_test.go +++ b/pkg/scheduler/plugins/loadaware/pod_assign_cache_test.go @@ -100,7 +100,7 @@ func TestPodAssignCache_OnAdd(t *testing.T) { }() timeNowFn = fakeTimeNowFn assignCache := newPodAssignCache() - assignCache.OnAdd(tt.pod) + assignCache.OnAdd(tt.pod, true) assert.Equal(t, tt.wantCache, assignCache.podInfoItems) }) } diff --git a/pkg/scheduler/plugins/nodenumaresource/plugin.go b/pkg/scheduler/plugins/nodenumaresource/plugin.go index 92f4c50fc..54d3b810a 100644 --- a/pkg/scheduler/plugins/nodenumaresource/plugin.go +++ b/pkg/scheduler/plugins/nodenumaresource/plugin.go @@ -207,12 +207,12 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, *fram return state, nil } -func (p *Plugin) EventsToRegister() []framework.ClusterEvent { +func (p *Plugin) EventsToRegister() []framework.ClusterEventWithHint { // To register a custom event, follow the naming convention at: // https://github.com/kubernetes/kubernetes/blob/e1ad9bee5bba8fbe85a6bf6201379ce8b1a611b1/pkg/scheduler/eventhandlers.go#L415-L422 gvk := fmt.Sprintf("noderesourcetopologies.%v.%v", nrtv1alpha1.SchemeGroupVersion.Version, nrtv1alpha1.SchemeGroupVersion.Group) - return []framework.ClusterEvent{ - {Resource: framework.GVK(gvk), ActionType: framework.Add | framework.Update | framework.Delete}, + return []framework.ClusterEventWithHint{ + {Event: framework.ClusterEvent{Resource: framework.GVK(gvk), ActionType: framework.Add | framework.Update | framework.Delete}}, } } @@ -222,7 +222,7 @@ func (p *Plugin) PreFilter(ctx context.Context, cycleState *framework.CycleState return nil, framework.NewStatus(framework.Error, err.Error()) } - requests, _ := resourceapi.PodRequestsAndLimits(pod) + requests := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}) if quotav1.IsZero(requests) { cycleState.Write(stateKey, &preFilterState{ skip: true, diff --git a/pkg/scheduler/plugins/nodenumaresource/plugin_test.go b/pkg/scheduler/plugins/nodenumaresource/plugin_test.go index 522627caf..b4672b0f9 100644 --- a/pkg/scheduler/plugins/nodenumaresource/plugin_test.go +++ b/pkg/scheduler/plugins/nodenumaresource/plugin_test.go @@ -18,6 +18,7 @@ package nodenumaresource import ( "context" + "errors" "fmt" "reflect" "testing" @@ -47,7 +48,7 @@ import ( koordinatorinformers "github.com/koordinator-sh/koordinator/pkg/client/informers/externalversions" schedulingconfig "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" _ "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/scheme" - "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta2" + "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta3" "github.com/koordinator-sh/koordinator/pkg/scheduler/frameworkext" frameworkexttesting "github.com/koordinator-sh/koordinator/pkg/scheduler/frameworkext/testing" "github.com/koordinator-sh/koordinator/pkg/scheduler/frameworkext/topologymanager" @@ -91,6 +92,14 @@ func newTestSharedLister(pods []*corev1.Pod, nodes []*corev1.Node) *testSharedLi } } +func (f *testSharedLister) StorageInfos() framework.StorageInfoLister { + return f +} + +func (f *testSharedLister) IsPVCUsedByPods(key string) bool { + return false +} + func (f *testSharedLister) NodeInfos() framework.NodeInfoLister { return f } @@ -126,7 +135,7 @@ func makePodOnNode(request map[corev1.ResourceName]string, node string, isCPUSet extension.LabelPodQoS: string(extension.QoSLSR), } if node != "" { - reqs, _ := apiresource.PodRequestsAndLimits(pod) + reqs := apiresource.PodRequests(pod, apiresource.PodResourcesOptions{}) val := reqs.Cpu().MilliValue() / 1000 _ = extension.SetResourceStatus(pod, &extension.ResourceStatus{CPUSet: fmt.Sprintf("0-%d", val-1)}) } @@ -154,10 +163,10 @@ type pluginTestSuit struct { } func newPluginTestSuit(t *testing.T, pods []*corev1.Pod, nodes []*corev1.Node) *pluginTestSuit { - var v1beta2args v1beta2.NodeNUMAResourceArgs - v1beta2.SetDefaults_NodeNUMAResourceArgs(&v1beta2args) + var v1beta3args v1beta3.NodeNUMAResourceArgs + v1beta3.SetDefaults_NodeNUMAResourceArgs(&v1beta3args) var nodeNUMAResourceArgs schedulingconfig.NodeNUMAResourceArgs - err := v1beta2.Convert_v1beta2_NodeNUMAResourceArgs_To_config_NodeNUMAResourceArgs(&v1beta2args, &nodeNUMAResourceArgs, nil) + err := v1beta3.Convert_v1beta3_NodeNUMAResourceArgs_To_config_NodeNUMAResourceArgs(&v1beta3args, &nodeNUMAResourceArgs, nil) assert.NoError(t, err) nrtClientSet := nrtfake.NewSimpleClientset() @@ -190,6 +199,7 @@ func newPluginTestSuit(t *testing.T, pods []*corev1.Pod, nodes []*corev1.Node) * informerFactory := informers.NewSharedInformerFactory(cs, 0) snapshot := newTestSharedLister(pods, nodes) fh, err := st.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", runtime.WithClientSet(cs), @@ -996,7 +1006,7 @@ func TestFilterWithAmplifiedCPUs(t *testing.T) { } handler := &podEventHandler{resourceManager: pl.resourceManager} for _, v := range tt.existingPods { - handler.OnAdd(v) + handler.OnAdd(v, true) } cycleState := framework.NewCycleState() @@ -1099,7 +1109,7 @@ func TestPlugin_Reserve(t *testing.T) { }, cpuTopology: buildCPUTopologyForTest(2, 1, 4, 2), pod: &corev1.Pod{}, - want: framework.NewStatus(framework.Error, "not enough cpus available to satisfy request"), + want: framework.AsStatus(errors.New("not enough cpus available to satisfy request")), }, { name: "succeed with valid cpu topology and node numa least allocate strategy", diff --git a/pkg/scheduler/plugins/nodenumaresource/pod_eventhandler.go b/pkg/scheduler/plugins/nodenumaresource/pod_eventhandler.go index 4606cbb78..75f816d87 100644 --- a/pkg/scheduler/plugins/nodenumaresource/pod_eventhandler.go +++ b/pkg/scheduler/plugins/nodenumaresource/pod_eventhandler.go @@ -49,7 +49,7 @@ func registerPodEventHandler(handle framework.Handle, resourceManager ResourceMa } } -func (c *podEventHandler) OnAdd(obj interface{}) { +func (c *podEventHandler) OnAdd(obj interface{}, isInInitialList bool) { pod, ok := obj.(*corev1.Pod) if !ok { return diff --git a/pkg/scheduler/plugins/nodenumaresource/pod_eventhandler_test.go b/pkg/scheduler/plugins/nodenumaresource/pod_eventhandler_test.go index 2409b504d..5c9c285f5 100644 --- a/pkg/scheduler/plugins/nodenumaresource/pod_eventhandler_test.go +++ b/pkg/scheduler/plugins/nodenumaresource/pod_eventhandler_test.go @@ -114,7 +114,7 @@ func TestPodEventHandler(t *testing.T) { handler := &podEventHandler{ resourceManager: resourceManager, } - handler.OnAdd(tt.pod) + handler.OnAdd(tt.pod, true) handler.OnUpdate(tt.pod, tt.pod) nodeAllocation := resourceManager.getOrCreateNodeAllocation("test-node-1") diff --git a/pkg/scheduler/plugins/nodenumaresource/scoring_test.go b/pkg/scheduler/plugins/nodenumaresource/scoring_test.go index 91dd4113a..6e0d7def1 100644 --- a/pkg/scheduler/plugins/nodenumaresource/scoring_test.go +++ b/pkg/scheduler/plugins/nodenumaresource/scoring_test.go @@ -290,7 +290,7 @@ func TestNUMANodeScore(t *testing.T) { for _, v := range tt.existingPods { builder := cpuset.NewCPUSetBuilder() if AllowUseCPUSet(v) { - requests, _ := apiresource.PodRequestsAndLimits(v) + requests := apiresource.PodRequests(v, apiresource.PodResourcesOptions{}) cpuCount := int(requests.Cpu().MilliValue() / 1000) for i := 0; i < cpuCount; i++ { builder.Add(i) @@ -814,7 +814,7 @@ func TestScoreWithAmplifiedCPUs(t *testing.T) { handler := &podEventHandler{resourceManager: pl.resourceManager} for _, v := range tt.existingPods { - handler.OnAdd(v) + handler.OnAdd(v, true) } state := framework.NewCycleState() diff --git a/pkg/scheduler/plugins/nodenumaresource/topology_eventhandler.go b/pkg/scheduler/plugins/nodenumaresource/topology_eventhandler.go index 231265e74..663560b63 100644 --- a/pkg/scheduler/plugins/nodenumaresource/topology_eventhandler.go +++ b/pkg/scheduler/plugins/nodenumaresource/topology_eventhandler.go @@ -59,7 +59,7 @@ func initNRTInformerFactory(handle framework.Handle) (nrtinformers.SharedInforme return nodeResTopologyInformerFactory, nil } -func (m *nodeResourceTopologyEventHandler) OnAdd(obj interface{}) { +func (m *nodeResourceTopologyEventHandler) OnAdd(obj interface{}, isInInitialList bool) { nodeResTopology, ok := obj.(*nrtv1alpha1.NodeResourceTopology) if !ok { return diff --git a/pkg/scheduler/plugins/reservation/controller/controller.go b/pkg/scheduler/plugins/reservation/controller/controller.go index 666722c74..ca966c887 100644 --- a/pkg/scheduler/plugins/reservation/controller/controller.go +++ b/pkg/scheduler/plugins/reservation/controller/controller.go @@ -223,7 +223,7 @@ func (c *Controller) syncStatus(reservation *schedulingv1alpha1.Reservation) err Name: pod.Name, UID: pod.UID, }) - requests, _ := resource.PodRequestsAndLimits(pod) + requests := resource.PodRequests(pod, resource.PodResourcesOptions{}) actualAllocated = quotav1.Add(actualAllocated, requests) } diff --git a/pkg/scheduler/plugins/reservation/eventhandler.go b/pkg/scheduler/plugins/reservation/eventhandler.go index 56b2fe6d3..09f60866c 100644 --- a/pkg/scheduler/plugins/reservation/eventhandler.go +++ b/pkg/scheduler/plugins/reservation/eventhandler.go @@ -44,7 +44,7 @@ func registerReservationEventHandler(cache *reservationCache, koordinatorInforme frameworkexthelper.ForceSyncFromInformer(context.TODO().Done(), koordinatorInformerFactory, reservationInformer, eventHandler) } -func (h *reservationEventHandler) OnAdd(obj interface{}) { +func (h *reservationEventHandler) OnAdd(obj interface{}, isInInitialList bool) { r, ok := obj.(*schedulingv1alpha1.Reservation) if !ok { return @@ -67,7 +67,8 @@ func (h *reservationEventHandler) OnUpdate(oldObj, newObj interface{}) { if reservationutil.IsReservationActive(newR) || reservationutil.IsReservationFailed(newR) || reservationutil.IsReservationSucceeded(newR) { h.cache.updateReservation(newR) - h.rrNominator.DeleteReservePod(framework.NewPodInfo(reservationutil.NewReservePod(newR))) + podInfo, _ := framework.NewPodInfo(reservationutil.NewReservePod(newR)) + h.rrNominator.DeleteReservePod(podInfo) klog.V(4).InfoS("update reservation into reservationCache", "reservation", klog.KObj(newR)) } } @@ -87,8 +88,8 @@ func (h *reservationEventHandler) OnDelete(obj interface{}) { klog.V(4).InfoS("reservation cache delete failed to parse, obj %T", obj) return } - - h.rrNominator.DeleteReservePod(framework.NewPodInfo(reservationutil.NewReservePod(r))) + podInfo, _ := framework.NewPodInfo(reservationutil.NewReservePod(r)) + h.rrNominator.DeleteReservePod(podInfo) // Here it is only marked that ReservationInfo is unavailable, // and the real deletion operation is executed in deleteReservationFromCache(pkg/scheduler/frameworkext/eventhandlers/reservation_handler.go). diff --git a/pkg/scheduler/plugins/reservation/eventhandler_test.go b/pkg/scheduler/plugins/reservation/eventhandler_test.go index 8f8d25732..5429c2ef9 100644 --- a/pkg/scheduler/plugins/reservation/eventhandler_test.go +++ b/pkg/scheduler/plugins/reservation/eventhandler_test.go @@ -102,7 +102,7 @@ func TestEventHandlerOnAdd(t *testing.T) { t.Run(tt.name, func(t *testing.T) { cache := newReservationCache(nil) eh := &reservationEventHandler{cache: cache} - eh.OnAdd(tt.reservation) + eh.OnAdd(tt.reservation, true) if tt.wantReservation == nil { rInfo := cache.getReservationInfoByUID(tt.reservation.UID) assert.Nil(t, rInfo) @@ -242,11 +242,13 @@ func TestEventHandlerDelete(t *testing.T) { } cache := newReservationCache(nil) eh := &reservationEventHandler{cache: cache, rrNominator: newNominator()} - eh.OnAdd(activeReservation) + eh.OnAdd(activeReservation, true) rInfo := cache.getReservationInfoByUID(activeReservation.UID) assert.NotNil(t, rInfo) - eh.rrNominator.AddNominatedReservePod(framework.NewPodInfo(reservation.NewReservePod(activeReservation)), "test-node") - assert.Equal(t, []*framework.PodInfo{framework.NewPodInfo(reservation.NewReservePod(activeReservation))}, eh.rrNominator.NominatedReservePodForNode("test-node")) + reservePodInfo, _ := framework.NewPodInfo(reservation.NewReservePod(activeReservation)) + eh.rrNominator.AddNominatedReservePod(reservePodInfo, "test-node") + reservePodInfo, _ = framework.NewPodInfo(reservation.NewReservePod(activeReservation)) + assert.Equal(t, []*framework.PodInfo{reservePodInfo}, eh.rrNominator.NominatedReservePodForNode("test-node")) eh.OnDelete(activeReservation) rInfo = cache.getReservationInfoByUID(activeReservation.UID) assert.NotNil(t, rInfo) diff --git a/pkg/scheduler/plugins/reservation/nominator.go b/pkg/scheduler/plugins/reservation/nominator.go index 6feb6289c..bb7fd0eab 100644 --- a/pkg/scheduler/plugins/reservation/nominator.go +++ b/pkg/scheduler/plugins/reservation/nominator.go @@ -200,11 +200,13 @@ func (pl *Plugin) RemoveNominatedReservations(pod *corev1.Pod) { } func (pl *Plugin) AddNominatedReservePod(pod *corev1.Pod, nodeName string) { - pl.nominator.AddNominatedReservePod(framework.NewPodInfo(pod), nodeName) + podInfo, _ := framework.NewPodInfo(pod) + pl.nominator.AddNominatedReservePod(podInfo, nodeName) } func (pl *Plugin) DeleteNominatedReservePod(pod *corev1.Pod) { - pl.nominator.DeleteReservePod(framework.NewPodInfo(pod)) + podInfo, _ := framework.NewPodInfo(pod) + pl.nominator.DeleteReservePod(podInfo) } func (pl *Plugin) GetNominatedReservation(pod *corev1.Pod, nodeName string) *frameworkext.ReservationInfo { diff --git a/pkg/scheduler/plugins/reservation/nominator_test.go b/pkg/scheduler/plugins/reservation/nominator_test.go index ecf3d1f6f..fdd92c0e1 100644 --- a/pkg/scheduler/plugins/reservation/nominator_test.go +++ b/pkg/scheduler/plugins/reservation/nominator_test.go @@ -287,7 +287,7 @@ func TestNominateReservation(t *testing.T) { assert.NoError(t, err) pl := plugin.(*Plugin) cycleState := framework.NewCycleState() - requests, _ := apiresource.PodRequestsAndLimits(tt.pod) + requests := apiresource.PodRequests(tt.pod, apiresource.PodResourcesOptions{}) state := &stateData{ nodeReservationStates: map[string]nodeReservationState{}, podRequests: requests, diff --git a/pkg/scheduler/plugins/reservation/plugin.go b/pkg/scheduler/plugins/reservation/plugin.go index 08187fa20..913956109 100644 --- a/pkg/scheduler/plugins/reservation/plugin.go +++ b/pkg/scheduler/plugins/reservation/plugin.go @@ -134,12 +134,12 @@ func (pl *Plugin) NewControllers() ([]frameworkext.Controller, error) { return []frameworkext.Controller{reservationController}, nil } -func (pl *Plugin) EventsToRegister() []framework.ClusterEvent { +func (pl *Plugin) EventsToRegister() []framework.ClusterEventWithHint { // To register a custom event, follow the naming convention at: // https://github.com/kubernetes/kubernetes/blob/e1ad9bee5bba8fbe85a6bf6201379ce8b1a611b1/pkg/scheduler/eventhandlers.go#L415-L422 gvk := fmt.Sprintf("reservations.%v.%v", schedulingv1alpha1.GroupVersion.Version, schedulingv1alpha1.GroupVersion.Group) - return []framework.ClusterEvent{ - {Resource: framework.GVK(gvk), ActionType: framework.Add | framework.Update | framework.Delete}, + return []framework.ClusterEventWithHint{ + {Event: framework.ClusterEvent{Resource: framework.GVK(gvk), ActionType: framework.Add | framework.Update | framework.Delete}}, } } @@ -238,7 +238,7 @@ func (pl *Plugin) PreFilter(ctx context.Context, cycleState *framework.CycleStat return nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonReservationAffinity) } preResult = &framework.PreFilterResult{ - NodeNames: sets.NewString(), + NodeNames: sets.Set[string]{}, } for nodeName := range state.nodeReservationStates { preResult.NodeNames.Insert(nodeName) @@ -252,7 +252,7 @@ func (pl *Plugin) PreFilterExtensions() framework.PreFilterExtensions { } func (pl *Plugin) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *corev1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status { - podRequests, _ := resourceapi.PodRequestsAndLimits(podInfoToAdd.Pod) + podRequests := resourceapi.PodRequests(podInfoToAdd.Pod, resourceapi.PodResourcesOptions{}) if quotav1.IsZero(podRequests) { return nil } @@ -280,7 +280,7 @@ func (pl *Plugin) AddPod(ctx context.Context, cycleState *framework.CycleState, } func (pl *Plugin) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *corev1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status { - podRequests, _ := resourceapi.PodRequestsAndLimits(podInfoToRemove.Pod) + podRequests := resourceapi.PodRequests(podInfoToRemove.Pod, resourceapi.PodResourcesOptions{}) if quotav1.IsZero(podRequests) { return nil } diff --git a/pkg/scheduler/plugins/reservation/plugin_test.go b/pkg/scheduler/plugins/reservation/plugin_test.go index 15911c90d..e2d75fd9a 100644 --- a/pkg/scheduler/plugins/reservation/plugin_test.go +++ b/pkg/scheduler/plugins/reservation/plugin_test.go @@ -49,7 +49,7 @@ import ( koordfake "github.com/koordinator-sh/koordinator/pkg/client/clientset/versioned/fake" koordinatorinformers "github.com/koordinator-sh/koordinator/pkg/client/informers/externalversions" "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config" - "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta2" + "github.com/koordinator-sh/koordinator/pkg/scheduler/apis/config/v1beta3" "github.com/koordinator-sh/koordinator/pkg/scheduler/frameworkext" reservationutil "github.com/koordinator-sh/koordinator/pkg/util/reservation" ) @@ -96,6 +96,14 @@ func (f *fakeSharedLister) NodeInfos() framework.NodeInfoLister { return f } +func (f *fakeSharedLister) StorageInfos() framework.StorageInfoLister { + return f +} + +func (f *fakeSharedLister) IsPVCUsedByPods(key string) bool { + return false +} + func (f *fakeSharedLister) List() ([]*framework.NodeInfo, error) { if f.listErr { return nil, fmt.Errorf("list error") @@ -122,10 +130,10 @@ type pluginTestSuit struct { } func newPluginTestSuitWith(t testing.TB, pods []*corev1.Pod, nodes []*corev1.Node) *pluginTestSuit { - var v1beta2args v1beta2.ReservationArgs - v1beta2.SetDefaults_ReservationArgs(&v1beta2args) + var v1beta3args v1beta3.ReservationArgs + v1beta3.SetDefaults_ReservationArgs(&v1beta3args) var reservationArgs config.ReservationArgs - err := v1beta2.Convert_v1beta2_ReservationArgs_To_config_ReservationArgs(&v1beta2args, &reservationArgs, nil) + err := v1beta3.Convert_v1beta3_ReservationArgs_To_config_ReservationArgs(&v1beta3args, &reservationArgs, nil) assert.NoError(t, err) koordClientSet := koordfake.NewSimpleClientset() @@ -150,6 +158,7 @@ func newPluginTestSuitWith(t testing.TB, pods []*corev1.Pod, nodes []*corev1.Nod eventRecorder := record.NewEventRecorderAdapter(fakeRecorder) fw, err := schedulertesting.NewFramework( + context.TODO(), registeredPlugins, "koord-scheduler", frameworkruntime.WithClientSet(cs), @@ -284,7 +293,7 @@ func TestPreFilter(t *testing.T) { }, wantStatus: nil, wantPreRes: &framework.PreFilterResult{ - NodeNames: sets.NewString("test-node-1"), + NodeNames: sets.New("test-node-1"), }, }, } @@ -562,7 +571,7 @@ func TestFilter(t *testing.T) { pl := p.(*Plugin) cycleState := framework.NewCycleState() if tt.stateData != nil { - tt.stateData.podRequests, _ = apiresource.PodRequestsAndLimits(tt.pod) + tt.stateData.podRequests = apiresource.PodRequests(tt.pod, apiresource.PodResourcesOptions{}) tt.stateData.podRequestsResources = framework.NewResource(tt.stateData.podRequests) cycleState.Write(stateKey, tt.stateData) } @@ -1441,7 +1450,7 @@ func TestPreFilterExtensionAddPod(t *testing.T) { pl.reservationCache.updateReservation(reservation) assert.NoError(t, pl.reservationCache.assumePod(reservation.UID, tt.pod)) } - podInfo := framework.NewPodInfo(tt.pod) + podInfo, _ := framework.NewPodInfo(tt.pod) nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(node) status := pl.PreFilterExtensions().AddPod(context.TODO(), cycleState, nil, podInfo, nodeInfo) @@ -1553,7 +1562,7 @@ func TestPreFilterExtensionRemovePod(t *testing.T) { pl.reservationCache.updateReservation(reservation) assert.NoError(t, pl.reservationCache.assumePod(reservation.UID, tt.pod)) } - podInfo := framework.NewPodInfo(tt.pod) + podInfo, _ := framework.NewPodInfo(tt.pod) nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(node) status := pl.PreFilterExtensions().RemovePod(context.TODO(), cycleState, nil, podInfo, nodeInfo) diff --git a/pkg/scheduler/plugins/reservation/pod_eventhandler.go b/pkg/scheduler/plugins/reservation/pod_eventhandler.go index f42846215..0d7497d6b 100644 --- a/pkg/scheduler/plugins/reservation/pod_eventhandler.go +++ b/pkg/scheduler/plugins/reservation/pod_eventhandler.go @@ -50,7 +50,7 @@ func assignedPod(pod *corev1.Pod) bool { return len(pod.Spec.NodeName) != 0 } -func (h *podEventHandler) OnAdd(obj interface{}) { +func (h *podEventHandler) OnAdd(obj interface{}, isInInitialList bool) { pod, _ := obj.(*corev1.Pod) if pod == nil { return @@ -96,7 +96,8 @@ func (h *podEventHandler) updatePod(oldPod, newPod *corev1.Pod) { } h.nominator.RemoveNominatedReservation(newPod) - h.nominator.DeleteReservePod(framework.NewPodInfo(newPod)) + podInfo, _ := framework.NewPodInfo(newPod) + h.nominator.DeleteReservePod(podInfo) var reservationUID types.UID if oldPod != nil { @@ -130,7 +131,8 @@ func (h *podEventHandler) updatePod(oldPod, newPod *corev1.Pod) { func (h *podEventHandler) deletePod(pod *corev1.Pod) { h.nominator.RemoveNominatedReservation(pod) - h.nominator.DeleteReservePod(framework.NewPodInfo(pod)) + podInfo, _ := framework.NewPodInfo(pod) + h.nominator.DeleteReservePod(podInfo) reservationAllocated, err := apiext.GetReservationAllocated(pod) if err == nil && reservationAllocated != nil && reservationAllocated.UID != "" { diff --git a/pkg/scheduler/plugins/reservation/pod_eventhandler_test.go b/pkg/scheduler/plugins/reservation/pod_eventhandler_test.go index b8cb9b01e..b01a3cd87 100644 --- a/pkg/scheduler/plugins/reservation/pod_eventhandler_test.go +++ b/pkg/scheduler/plugins/reservation/pod_eventhandler_test.go @@ -76,19 +76,16 @@ func TestPodEventHandler(t *testing.T) { }, } - handler.nominator.AddNominatedReservePod(framework.NewPodInfo(pod), "test-node-1") + podInfo, _ := framework.NewPodInfo(pod) + handler.nominator.AddNominatedReservePod(podInfo, "test-node-1") assert.Equal(t, "test-node-1", handler.nominator.nominatedReservePodToNode[pod.UID]) - assert.Equal(t, []*framework.PodInfo{ - framework.NewPodInfo(pod), - }, handler.nominator.nominatedReservePod["test-node-1"]) - handler.OnAdd(pod) + assert.Equal(t, []*framework.PodInfo{podInfo}, handler.nominator.nominatedReservePod["test-node-1"]) + handler.OnAdd(pod, true) rInfo := handler.cache.getReservationInfoByUID(reservationUID) assert.Empty(t, rInfo.AssignedPods) // pod not assigned, no need to delete reservation nominated node assert.Equal(t, "test-node-1", handler.nominator.nominatedReservePodToNode[pod.UID]) - assert.Equal(t, []*framework.PodInfo{ - framework.NewPodInfo(pod), - }, handler.nominator.nominatedReservePod["test-node-1"]) + assert.Equal(t, []*framework.PodInfo{podInfo}, handler.nominator.nominatedReservePod["test-node-1"]) newPod := pod.DeepCopy() apiext.SetReservationAllocated(newPod, reservation) @@ -97,9 +94,7 @@ func TestPodEventHandler(t *testing.T) { assert.Len(t, rInfo.AssignedPods, 0) // pod not assigned, no need to delete reservation nominated node assert.Equal(t, "test-node-1", handler.nominator.nominatedReservePodToNode[pod.UID]) - assert.Equal(t, []*framework.PodInfo{ - framework.NewPodInfo(pod), - }, handler.nominator.nominatedReservePod["test-node-1"]) + assert.Equal(t, []*framework.PodInfo{podInfo}, handler.nominator.nominatedReservePod["test-node-1"]) newPod.Spec.NodeName = reservation.Status.NodeName handler.OnUpdate(pod, newPod) @@ -119,38 +114,36 @@ func TestPodEventHandler(t *testing.T) { } assert.Equal(t, expectPodRequirement, rInfo.AssignedPods[pod.UID]) - handler.nominator.nominatedReservePod["test-node-1"] = []*framework.PodInfo{ - framework.NewPodInfo(&corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-1", - UID: "test-1", - }, - }), - } - handler.nominator.AddNominatedReservePod(framework.NewPodInfo(newPod), "test-node-1") + podInfo, _ = framework.NewPodInfo(&corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + UID: "test-1", + }, + }) + handler.nominator.nominatedReservePod["test-node-1"] = []*framework.PodInfo{podInfo} + podInfo, _ = framework.NewPodInfo(newPod) + handler.nominator.AddNominatedReservePod(podInfo, "test-node-1") assert.Equal(t, "test-node-1", handler.nominator.nominatedReservePodToNode[newPod.UID]) - assert.Equal(t, []*framework.PodInfo{ - framework.NewPodInfo(&corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-1", - UID: "test-1", - }, - }), - framework.NewPodInfo(newPod), - }, handler.nominator.nominatedReservePod["test-node-1"]) + podInfo1, _ := framework.NewPodInfo(&corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + UID: "test-1", + }, + }) + podInfo2, _ := framework.NewPodInfo(newPod) + assert.Equal(t, []*framework.PodInfo{podInfo1, podInfo2}, handler.nominator.nominatedReservePod["test-node-1"]) handler.OnDelete(newPod) rInfo = handler.cache.getReservationInfoByUID(reservationUID) assert.Empty(t, rInfo.AssignedPods) assert.Equal(t, "", handler.nominator.nominatedReservePodToNode[newPod.UID]) - assert.Equal(t, []*framework.PodInfo{ - framework.NewPodInfo(&corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-1", - UID: "test-1", - }, - }), - }, handler.nominator.nominatedReservePod["test-node-1"]) + podInfo, _ = framework.NewPodInfo(&corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + UID: "test-1", + }, + }) + assert.Equal(t, []*framework.PodInfo{podInfo}, handler.nominator.nominatedReservePod["test-node-1"]) } func TestPodEventHandlerWithOperatingPod(t *testing.T) { @@ -173,7 +166,7 @@ func TestPodEventHandlerWithOperatingPod(t *testing.T) { NodeName: "test-node-1", }, } - handler.OnAdd(operatingReservationPod) + handler.OnAdd(operatingReservationPod, true) pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -194,7 +187,7 @@ func TestPodEventHandlerWithOperatingPod(t *testing.T) { }, }, } - handler.OnAdd(pod) + handler.OnAdd(pod, true) rInfo := handler.cache.getReservationInfoByUID(operatingReservationPod.UID) assert.NotNil(t, rInfo) diff --git a/pkg/scheduler/plugins/reservation/scoring.go b/pkg/scheduler/plugins/reservation/scoring.go index 65c286225..00fc70d62 100644 --- a/pkg/scheduler/plugins/reservation/scoring.go +++ b/pkg/scheduler/plugins/reservation/scoring.go @@ -76,7 +76,7 @@ func (pl *Plugin) PreScore(ctx context.Context, cycleState *framework.CycleState index := atomic.AddInt32(&nominatedNodeIndex, 1) nominatedReservations[index-1] = nominatedReservationInfo } - }) + }, "ReservationPreScore") if err := errCh.ReceiveError(); err != nil { return framework.AsStatus(err) } @@ -182,7 +182,7 @@ func findMostPreferredReservationByOrder(rOnNode []*frameworkext.ReservationInfo func scoreReservation(pod *corev1.Pod, rInfo *frameworkext.ReservationInfo, allocated corev1.ResourceList) int64 { // TODO(joseph): we should support zero-request pods - requested, _ := resourceapi.PodRequestsAndLimits(pod) + requested := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}) requested = quotav1.Add(requested, allocated) resources := quotav1.RemoveZeros(rInfo.Allocatable) diff --git a/pkg/scheduler/plugins/reservation/scoring_test.go b/pkg/scheduler/plugins/reservation/scoring_test.go index 1c046a021..b432fb479 100644 --- a/pkg/scheduler/plugins/reservation/scoring_test.go +++ b/pkg/scheduler/plugins/reservation/scoring_test.go @@ -221,7 +221,7 @@ func TestScore(t *testing.T) { state := &stateData{ nodeReservationStates: map[string]nodeReservationState{}, } - state.podRequests, _ = apiresource.PodRequestsAndLimits(tt.pod) + state.podRequests = apiresource.PodRequests(tt.pod, apiresource.PodResourcesOptions{}) state.podRequestsResources = framework.NewResource(state.podRequests) for _, reservation := range tt.reservations { rInfo := frameworkext.NewReservationInfo(reservation) @@ -323,7 +323,7 @@ func TestScoreWithOrder(t *testing.T) { state := &stateData{ nodeReservationStates: map[string]nodeReservationState{}, } - state.podRequests, _ = apiresource.PodRequestsAndLimits(normalPod) + state.podRequests = apiresource.PodRequests(normalPod, apiresource.PodResourcesOptions{}) state.podRequestsResources = framework.NewResource(state.podRequests) // add three Reservations to three node @@ -694,7 +694,7 @@ func TestPreScoreWithNominateReservation(t *testing.T) { state := &stateData{ nodeReservationStates: map[string]nodeReservationState{}, } - state.podRequests, _ = apiresource.PodRequestsAndLimits(tt.pod) + state.podRequests = apiresource.PodRequests(tt.pod, apiresource.PodResourcesOptions{}) state.podRequestsResources = framework.NewResource(state.podRequests) for _, reservation := range tt.reservations { rInfo := frameworkext.NewReservationInfo(reservation) diff --git a/pkg/scheduler/plugins/reservation/transformer.go b/pkg/scheduler/plugins/reservation/transformer.go index 8abc415f0..7592ca88b 100644 --- a/pkg/scheduler/plugins/reservation/transformer.go +++ b/pkg/scheduler/plugins/reservation/transformer.go @@ -48,6 +48,7 @@ func (pl *Plugin) BeforePreFilter(ctx context.Context, cycleState *framework.Cyc } func (pl *Plugin) prepareMatchReservationState(ctx context.Context, cycleState *framework.CycleState, pod *corev1.Pod) (*stateData, bool, *framework.Status) { + logger := klog.FromContext(ctx) reservationAffinity, err := reservationutil.GetRequiredReservationAffinity(pod) if err != nil { klog.ErrorS(err, "Failed to parse reservation affinity", "pod", klog.KObj(pod)) @@ -136,7 +137,7 @@ func (pl *Plugin) prepareMatchReservationState(ctx context.Context, cycleState * return } - if err := extender.Scheduler().GetCache().InvalidNodeInfo(node.Name); err != nil { + if err := extender.Scheduler().GetCache().InvalidNodeInfo(logger, node.Name); err != nil { klog.ErrorS(err, "Failed to InvalidNodeInfo", "pod", klog.KObj(pod), "node", node.Name) errCh.SendErrorWithCancel(err, cancel) return @@ -189,7 +190,7 @@ func (pl *Plugin) prepareMatchReservationState(ctx context.Context, cycleState * allPluginToRestoreState[index-1] = pluginToRestoreState } } - pl.handle.Parallelizer().Until(parallelCtx, len(allNodes), processNode) + pl.handle.Parallelizer().Until(parallelCtx, len(allNodes), processNode, "transformNodesWithReservation") err = errCh.ReceiveError() if err != nil { klog.ErrorS(err, "Failed to find matched or unmatched reservations", "pod", klog.KObj(pod)) @@ -199,7 +200,7 @@ func (pl *Plugin) prepareMatchReservationState(ctx context.Context, cycleState * allNodeReservationStates = allNodeReservationStates[:stateIndex] allPluginToRestoreState = allPluginToRestoreState[:stateIndex] - podRequests, _ := resourceapi.PodRequestsAndLimits(pod) + podRequests := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}) podRequestResources := framework.NewResource(podRequests) state := &stateData{ hasAffinity: reservationAffinity != nil, @@ -438,7 +439,7 @@ func (pl *Plugin) BeforeFilter(ctx context.Context, cycleState *framework.CycleS for _, rInfo := range nominatedReservationInfos { if schedulingcorev1.PodPriority(rInfo.Pod) >= schedulingcorev1.PodPriority(pod) && rInfo.Pod.UID != pod.UID { - pInfo := framework.NewPodInfo(rInfo.Pod) + pInfo, _ := framework.NewPodInfo(rInfo.Pod) nodeInfoOut.AddPodInfo(pInfo) status := pl.handle.RunPreFilterExtensionAddPod(ctx, cycleState, pod, pInfo, nodeInfoOut) if !status.IsSuccess() { diff --git a/pkg/scheduler/plugins/reservation/transformer_benchmark_test.go b/pkg/scheduler/plugins/reservation/transformer_benchmark_test.go index 6d07e65db..ce6306279 100644 --- a/pkg/scheduler/plugins/reservation/transformer_benchmark_test.go +++ b/pkg/scheduler/plugins/reservation/transformer_benchmark_test.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/utils/pointer" @@ -105,7 +106,7 @@ func BenchmarkBeforePrefilterWithMatchedPod(b *testing.B) { reservePod := reservationutil.NewReservePod(reservation) reservePods[string(reservePod.UID)] = reservePod nodeInfo.AddPod(reservePod) - assert.NoError(b, pl.handle.Scheduler().GetCache().AddPod(reservePod)) + assert.NoError(b, pl.handle.Scheduler().GetCache().AddPod(klog.Background(), reservePod)) } pod := &corev1.Pod{ @@ -248,8 +249,8 @@ func BenchmarkBeforePrefilterWithUnmatchedPod(b *testing.B) { reservePod := reservationutil.NewReservePod(reservation) nodeInfo.AddPod(reservePod) nodeInfo.AddPod(assignedPod) - assert.NoError(b, pl.handle.Scheduler().GetCache().AddPod(reservePod)) - assert.NoError(b, pl.handle.Scheduler().GetCache().AddPod(assignedPod)) + assert.NoError(b, pl.handle.Scheduler().GetCache().AddPod(klog.Background(), reservePod)) + assert.NoError(b, pl.handle.Scheduler().GetCache().AddPod(klog.Background(), assignedPod)) } pod := &corev1.Pod{ diff --git a/pkg/slo-controller/config/configmap_event_handler.go b/pkg/slo-controller/config/configmap_event_handler.go index ca1ae9ea5..652dabafd 100644 --- a/pkg/slo-controller/config/configmap_event_handler.go +++ b/pkg/slo-controller/config/configmap_event_handler.go @@ -17,6 +17,7 @@ limitations under the License. package config import ( + "context" "reflect" corev1 "k8s.io/api/core/v1" @@ -34,7 +35,7 @@ type EnqueueRequestForConfigMap struct { SyncCacheIfChanged func(configMap *corev1.ConfigMap) bool } -func (p *EnqueueRequestForConfigMap) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (p *EnqueueRequestForConfigMap) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { configMap, ok := evt.Object.(*corev1.ConfigMap) if !ok { return @@ -49,13 +50,13 @@ func (p *EnqueueRequestForConfigMap) Create(evt event.CreateEvent, q workqueue.R p.EnqueueRequest(&q) } -func (p *EnqueueRequestForConfigMap) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (p *EnqueueRequestForConfigMap) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { } -func (p *EnqueueRequestForConfigMap) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (p *EnqueueRequestForConfigMap) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { } -func (p *EnqueueRequestForConfigMap) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (p *EnqueueRequestForConfigMap) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { newConfigMap := evt.ObjectNew.(*corev1.ConfigMap) oldConfigMap := evt.ObjectOld.(*corev1.ConfigMap) if reflect.DeepEqual(newConfigMap.Data, oldConfigMap.Data) { diff --git a/pkg/slo-controller/config/configmap_event_handler_test.go b/pkg/slo-controller/config/configmap_event_handler_test.go index f91a51097..7fa2905c6 100644 --- a/pkg/slo-controller/config/configmap_event_handler_test.go +++ b/pkg/slo-controller/config/configmap_event_handler_test.go @@ -17,6 +17,7 @@ limitations under the License. package config import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -138,7 +139,7 @@ func Test_common_Create(t *testing.T) { p.SyncCacheIfChanged = tt.args.cacheChangedReturn } q := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) - p.Create(tt.args.evt, q) + p.Create(context.TODO(), tt.args.evt, q) assert.Equal(t, q.Len(), len(tt.want.objs), "Create() test fail, len expect equal!") @@ -299,7 +300,7 @@ func Test_common_Update(t *testing.T) { p.SyncCacheIfChanged = tt.args.cacheChangedReturn } q := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) - p.Update(tt.args.evt, q) + p.Update(context.TODO(), tt.args.evt, q) assert.Equal(t, q.Len(), len(tt.want.objs), "update() test fail, len expect equal!") diff --git a/pkg/slo-controller/nodemetric/node_event_handler.go b/pkg/slo-controller/nodemetric/node_event_handler.go index 976dbbfce..1f2e8e375 100644 --- a/pkg/slo-controller/nodemetric/node_event_handler.go +++ b/pkg/slo-controller/nodemetric/node_event_handler.go @@ -17,6 +17,7 @@ limitations under the License. package nodemetric import ( + "context" "reflect" corev1 "k8s.io/api/core/v1" @@ -34,7 +35,7 @@ type EnqueueRequestForNode struct { client.Client } -func (n *EnqueueRequestForNode) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { +func (n *EnqueueRequestForNode) Create(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { if node, ok := e.Object.(*corev1.Node); !ok { return } else { @@ -46,7 +47,7 @@ func (n *EnqueueRequestForNode) Create(e event.CreateEvent, q workqueue.RateLimi } } -func (n *EnqueueRequestForNode) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (n *EnqueueRequestForNode) Update(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { newNode, oldNode := e.ObjectNew.(*corev1.Node), e.ObjectOld.(*corev1.Node) // TODO, only use for noderesource if !isNodeAllocatableUpdated(newNode, oldNode) { @@ -59,7 +60,7 @@ func (n *EnqueueRequestForNode) Update(e event.UpdateEvent, q workqueue.RateLimi }) } -func (n *EnqueueRequestForNode) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (n *EnqueueRequestForNode) Delete(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { if node, ok := e.Object.(*corev1.Node); !ok { return } else { @@ -71,7 +72,8 @@ func (n *EnqueueRequestForNode) Delete(e event.DeleteEvent, q workqueue.RateLimi } } -func (n *EnqueueRequestForNode) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) {} +func (n *EnqueueRequestForNode) Generic(ctx context.Context, e event.GenericEvent, q workqueue.RateLimitingInterface) { +} // isNodeAllocatableUpdated returns whether the new node's allocatable is different from the old one's func isNodeAllocatableUpdated(newNode *corev1.Node, oldNode *corev1.Node) bool { diff --git a/pkg/slo-controller/nodemetric/node_event_handler_test.go b/pkg/slo-controller/nodemetric/node_event_handler_test.go index f64b7332a..992d1dc71 100644 --- a/pkg/slo-controller/nodemetric/node_event_handler_test.go +++ b/pkg/slo-controller/nodemetric/node_event_handler_test.go @@ -17,6 +17,7 @@ limitations under the License. package nodemetric import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -53,7 +54,7 @@ func Test_EnqueueRequestForNode(t *testing.T) { { name: "create node event", fn: func(handler *EnqueueRequestForNode, q workqueue.RateLimitingInterface) { - handler.Create(event.CreateEvent{ + handler.Create(context.TODO(), event.CreateEvent{ Object: &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -67,7 +68,7 @@ func Test_EnqueueRequestForNode(t *testing.T) { { name: "create event not node", fn: func(handler *EnqueueRequestForNode, q workqueue.RateLimitingInterface) { - handler.Create(event.CreateEvent{ + handler.Create(context.TODO(), event.CreateEvent{ Object: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", @@ -80,7 +81,7 @@ func Test_EnqueueRequestForNode(t *testing.T) { { name: "delete node event", fn: func(handler *EnqueueRequestForNode, q workqueue.RateLimitingInterface) { - handler.Delete(event.DeleteEvent{ + handler.Delete(context.TODO(), event.DeleteEvent{ Object: &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -94,7 +95,7 @@ func Test_EnqueueRequestForNode(t *testing.T) { { name: "delete event not node", fn: func(handler *EnqueueRequestForNode, q workqueue.RateLimitingInterface) { - handler.Delete(event.DeleteEvent{ + handler.Delete(context.TODO(), event.DeleteEvent{ Object: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", @@ -107,7 +108,7 @@ func Test_EnqueueRequestForNode(t *testing.T) { { name: "update node event", fn: func(handler *EnqueueRequestForNode, q workqueue.RateLimitingInterface) { - handler.Update(event.UpdateEvent{ + handler.Update(context.TODO(), event.UpdateEvent{ ObjectOld: &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -131,7 +132,7 @@ func Test_EnqueueRequestForNode(t *testing.T) { { name: "update node event ignore", fn: func(handler *EnqueueRequestForNode, q workqueue.RateLimitingInterface) { - handler.Update(event.UpdateEvent{ + handler.Update(context.TODO(), event.UpdateEvent{ ObjectOld: &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -149,7 +150,7 @@ func Test_EnqueueRequestForNode(t *testing.T) { { name: "generic node event ignore", fn: func(handler *EnqueueRequestForNode, q workqueue.RateLimitingInterface) { - handler.Generic(event.GenericEvent{}, q) + handler.Generic(context.TODO(), event.GenericEvent{}, q) }, hasEvent: false, }, diff --git a/pkg/slo-controller/nodemetric/nodemetric_controller.go b/pkg/slo-controller/nodemetric/nodemetric_controller.go index 75c841a11..64f58b617 100644 --- a/pkg/slo-controller/nodemetric/nodemetric_controller.go +++ b/pkg/slo-controller/nodemetric/nodemetric_controller.go @@ -31,7 +31,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/source" slov1alpha1 "github.com/koordinator-sh/koordinator/apis/slo/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/slo-controller/config" @@ -194,8 +193,8 @@ func (r *NodeMetricReconciler) SetupWithManager(mgr ctrl.Manager) error { r.cfgCache = handler return ctrl.NewControllerManagedBy(mgr). For(&slov1alpha1.NodeMetric{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). - Watches(&source.Kind{Type: &corev1.Node{}}, &EnqueueRequestForNode{}). - Watches(&source.Kind{Type: &corev1.ConfigMap{}}, handler). + Watches(&corev1.Node{}, &EnqueueRequestForNode{}). + Watches(&corev1.ConfigMap{}, handler). Named(Name). Complete(r) } diff --git a/pkg/slo-controller/nodemetric/nodemetric_controller_test.go b/pkg/slo-controller/nodemetric/nodemetric_controller_test.go index b7b2d2312..8cbebf072 100644 --- a/pkg/slo-controller/nodemetric/nodemetric_controller_test.go +++ b/pkg/slo-controller/nodemetric/nodemetric_controller_test.go @@ -535,7 +535,7 @@ func Test_CreateNodeMetricAndUpdateUnmarshalError(t *testing.T) { err = reconciler.Update(context.TODO(), invalidConfigMap) assert.NoError(t, err) queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) - handler.Update(event.UpdateEvent{ObjectOld: configMap, ObjectNew: invalidConfigMap}, queue) + handler.Update(context.TODO(), event.UpdateEvent{ObjectOld: configMap, ObjectNew: invalidConfigMap}, queue) _, err = reconciler.Reconcile(ctx, nodeReq) assert.NoError(t, err) @@ -646,5 +646,5 @@ func prepareConfig(t *testing.T, reconciler *NodeMetricReconciler, handler *conf assert.NoError(t, err, "failed to create config") queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) - handler.Create(event.CreateEvent{Object: configMap}, queue) + handler.Create(context.TODO(), event.CreateEvent{Object: configMap}, queue) } diff --git a/pkg/slo-controller/noderesource/nodemetric_event_handler.go b/pkg/slo-controller/noderesource/nodemetric_event_handler.go index fa56178b2..665f80900 100644 --- a/pkg/slo-controller/noderesource/nodemetric_event_handler.go +++ b/pkg/slo-controller/noderesource/nodemetric_event_handler.go @@ -17,6 +17,7 @@ limitations under the License. package noderesource import ( + "context" "fmt" "reflect" @@ -40,10 +41,10 @@ type EnqueueRequestForNodeMetric struct { syncContext *framework.SyncContext } -func (n *EnqueueRequestForNodeMetric) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { +func (n *EnqueueRequestForNodeMetric) Create(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { } -func (n *EnqueueRequestForNodeMetric) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (n *EnqueueRequestForNodeMetric) Update(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { newNodeMetric := e.ObjectNew.(*slov1alpha1.NodeMetric) oldNodeMetric := e.ObjectOld.(*slov1alpha1.NodeMetric) if reflect.DeepEqual(oldNodeMetric.Status, newNodeMetric.Status) { @@ -56,7 +57,7 @@ func (n *EnqueueRequestForNodeMetric) Update(e event.UpdateEvent, q workqueue.Ra }) } -func (n *EnqueueRequestForNodeMetric) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (n *EnqueueRequestForNodeMetric) Delete(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { nodeMetric, ok := e.Object.(*slov1alpha1.NodeMetric) if !ok { return @@ -71,7 +72,7 @@ func (n *EnqueueRequestForNodeMetric) Delete(e event.DeleteEvent, q workqueue.Ra }) } -func (n *EnqueueRequestForNodeMetric) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { +func (n *EnqueueRequestForNodeMetric) Generic(ctx context.Context, e event.GenericEvent, q workqueue.RateLimitingInterface) { } func (n *EnqueueRequestForNodeMetric) cleanSyncContext(nodeMetric *slov1alpha1.NodeMetric) error { diff --git a/pkg/slo-controller/noderesource/nodemetric_event_handler_test.go b/pkg/slo-controller/noderesource/nodemetric_event_handler_test.go index 0d734c31a..069579ea0 100644 --- a/pkg/slo-controller/noderesource/nodemetric_event_handler_test.go +++ b/pkg/slo-controller/noderesource/nodemetric_event_handler_test.go @@ -17,6 +17,7 @@ limitations under the License. package noderesource import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -46,14 +47,14 @@ func Test_EnqueueRequestForNodeMetricMetric(t *testing.T) { { name: "create nodemetric event", fn: func(handler *EnqueueRequestForNodeMetric, q workqueue.RateLimitingInterface) { - handler.Create(event.CreateEvent{}, q) + handler.Create(context.TODO(), event.CreateEvent{}, q) }, hasEvent: false, }, { name: "delete nodemetric event", fn: func(handler *EnqueueRequestForNodeMetric, q workqueue.RateLimitingInterface) { - handler.Delete(event.DeleteEvent{ + handler.Delete(context.TODO(), event.DeleteEvent{ Object: &slov1alpha1.NodeMetric{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -67,7 +68,7 @@ func Test_EnqueueRequestForNodeMetricMetric(t *testing.T) { { name: "delete event not nodemetric", fn: func(handler *EnqueueRequestForNodeMetric, q workqueue.RateLimitingInterface) { - handler.Delete(event.DeleteEvent{ + handler.Delete(context.TODO(), event.DeleteEvent{ Object: &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -80,14 +81,14 @@ func Test_EnqueueRequestForNodeMetricMetric(t *testing.T) { { name: "generic event ignore", fn: func(handler *EnqueueRequestForNodeMetric, q workqueue.RateLimitingInterface) { - handler.Generic(event.GenericEvent{}, q) + handler.Generic(context.TODO(), event.GenericEvent{}, q) }, hasEvent: false, }, { name: "update nodemetric event", fn: func(handler *EnqueueRequestForNodeMetric, q workqueue.RateLimitingInterface) { - handler.Update(event.UpdateEvent{ + handler.Update(context.TODO(), event.UpdateEvent{ ObjectOld: &slov1alpha1.NodeMetric{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -109,7 +110,7 @@ func Test_EnqueueRequestForNodeMetricMetric(t *testing.T) { { name: "update nodemetric event ignore", fn: func(handler *EnqueueRequestForNodeMetric, q workqueue.RateLimitingInterface) { - handler.Update(event.UpdateEvent{ + handler.Update(context.TODO(), event.UpdateEvent{ ObjectOld: &slov1alpha1.NodeMetric{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", diff --git a/pkg/slo-controller/noderesource/noderesource_controller.go b/pkg/slo-controller/noderesource/noderesource_controller.go index b0d9c002d..b33048b3f 100644 --- a/pkg/slo-controller/noderesource/noderesource_controller.go +++ b/pkg/slo-controller/noderesource/noderesource_controller.go @@ -27,12 +27,11 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "k8s.io/utils/clock" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/source" slov1alpha1 "github.com/koordinator-sh/koordinator/apis/slo/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/slo-controller/config" @@ -185,8 +184,8 @@ func (r *NodeResourceReconciler) SetupWithManager(mgr ctrl.Manager) error { builder := ctrl.NewControllerManagedBy(mgr). Named(Name). // avoid conflict with others reconciling `Node` For(&corev1.Node{}). - Watches(&source.Kind{Type: &slov1alpha1.NodeMetric{}}, &EnqueueRequestForNodeMetric{syncContext: r.NodeSyncContext}). - Watches(&source.Kind{Type: &corev1.ConfigMap{}}, cfgHandler) + Watches(&slov1alpha1.NodeMetric{}, &EnqueueRequestForNodeMetric{syncContext: r.NodeSyncContext}). + Watches(&corev1.ConfigMap{}, cfgHandler) // setup plugins // allow plugins to mutate controller via the builder diff --git a/pkg/slo-controller/noderesource/noderesource_controller_test.go b/pkg/slo-controller/noderesource/noderesource_controller_test.go index d6ab3b45c..943e8d32c 100644 --- a/pkg/slo-controller/noderesource/noderesource_controller_test.go +++ b/pkg/slo-controller/noderesource/noderesource_controller_test.go @@ -26,12 +26,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" + "k8s.io/utils/clock" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/koordinator-sh/koordinator/apis/configuration" @@ -39,6 +40,7 @@ import ( schedulingv1alpha1 "github.com/koordinator-sh/koordinator/apis/scheduling/v1alpha1" slov1alpha1 "github.com/koordinator-sh/koordinator/apis/slo/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/slo-controller/noderesource/framework" + "github.com/koordinator-sh/koordinator/pkg/util/testutil" ) func Test_NodeResourceController_ConfigNotAvailable(t *testing.T) { @@ -92,7 +94,11 @@ func Test_NodeResourceController_NodeMetricNotExist(t *testing.T) { slov1alpha1.AddToScheme(scheme) schedulingv1alpha1.AddToScheme(scheme) - client := fake.NewClientBuilder().WithScheme(scheme).Build() + client := fake.NewClientBuilder().WithScheme(scheme). + WithIndex(&corev1.Pod{}, "spec.nodeName", func(obj client.Object) []string { + return []string{obj.(*corev1.Pod).Spec.NodeName} + }). + Build() r := &NodeResourceReconciler{ Client: client, cfgCache: &FakeCfgCache{ @@ -124,7 +130,8 @@ func Test_NodeResourceController_NodeMetricNotExist(t *testing.T) { key := types.NamespacedName{Name: nodeName} nodeReq := ctrl.Request{NamespacedName: key} - opt := framework.NewOption().WithClient(client).WithScheme(scheme).WithControllerBuilder(&builder.Builder{}) + fakeBuilder := builder.ControllerManagedBy(&testutil.FakeManager{}) + opt := framework.NewOption().WithClient(client).WithScheme(scheme).WithControllerBuilder(fakeBuilder) framework.RunSetupExtenders(opt) result, err := r.Reconcile(ctx, nodeReq) @@ -137,7 +144,11 @@ func Test_NodeResourceController_ColocationEnabled(t *testing.T) { _ = clientgoscheme.AddToScheme(scheme) _ = slov1alpha1.AddToScheme(scheme) _ = schedulingv1alpha1.AddToScheme(scheme) - client := fake.NewClientBuilder().WithScheme(scheme).Build() + client := fake.NewClientBuilder().WithScheme(scheme). + WithIndex(&corev1.Pod{}, "spec.nodeName", func(obj client.Object) []string { + return []string{obj.(*corev1.Pod).Spec.NodeName} + }). + Build() r := &NodeResourceReconciler{ Client: client, cfgCache: &FakeCfgCache{ diff --git a/pkg/slo-controller/noderesource/plugins/batchresource/noderesourcetopology_event_handler.go b/pkg/slo-controller/noderesource/plugins/batchresource/noderesourcetopology_event_handler.go index 667a4e08e..4ac95e5ed 100644 --- a/pkg/slo-controller/noderesource/plugins/batchresource/noderesourcetopology_event_handler.go +++ b/pkg/slo-controller/noderesource/plugins/batchresource/noderesourcetopology_event_handler.go @@ -17,6 +17,7 @@ limitations under the License. package batchresource import ( + "context" "fmt" topologyv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1" @@ -41,7 +42,7 @@ type NRTHandler struct { syncContext *framework.SyncContext } -func (h *NRTHandler) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (h *NRTHandler) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { nrt, ok := evt.Object.(*topologyv1alpha1.NodeResourceTopology) if !ok { return @@ -58,7 +59,7 @@ func (h *NRTHandler) Create(evt event.CreateEvent, q workqueue.RateLimitingInter }) } -func (h *NRTHandler) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (h *NRTHandler) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { nrtOld, okOld := evt.ObjectOld.(*topologyv1alpha1.NodeResourceTopology) nrtNew, okNew := evt.ObjectNew.(*topologyv1alpha1.NodeResourceTopology) if !okOld || !okNew { @@ -80,7 +81,7 @@ func (h *NRTHandler) Update(evt event.UpdateEvent, q workqueue.RateLimitingInter }) } -func (h *NRTHandler) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (h *NRTHandler) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { nrt, ok := evt.Object.(*topologyv1alpha1.NodeResourceTopology) if !ok { return @@ -91,7 +92,7 @@ func (h *NRTHandler) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInter } } -func (h *NRTHandler) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (h *NRTHandler) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { } func isNRTResourcesCreated(nrt *topologyv1alpha1.NodeResourceTopology) bool { diff --git a/pkg/slo-controller/noderesource/plugins/batchresource/plugin.go b/pkg/slo-controller/noderesource/plugins/batchresource/plugin.go index 9f68b86e0..5ff83b217 100644 --- a/pkg/slo-controller/noderesource/plugins/batchresource/plugin.go +++ b/pkg/slo-controller/noderesource/plugins/batchresource/plugin.go @@ -32,7 +32,6 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/clock" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/koordinator-sh/koordinator/apis/configuration" "github.com/koordinator-sh/koordinator/apis/extension" @@ -79,7 +78,7 @@ func (p *Plugin) Setup(opt *framework.Option) error { } nrtHandler = &NRTHandler{syncContext: nrtSyncContext} - opt.Builder = opt.Builder.Watches(&source.Kind{Type: &topologyv1alpha1.NodeResourceTopology{}}, nrtHandler) + opt.Builder = opt.Builder.Watches(&topologyv1alpha1.NodeResourceTopology{}, nrtHandler) return nil } diff --git a/pkg/slo-controller/noderesource/plugins/batchresource/plugin_test.go b/pkg/slo-controller/noderesource/plugins/batchresource/plugin_test.go index 6746b6826..40f12ba2c 100644 --- a/pkg/slo-controller/noderesource/plugins/batchresource/plugin_test.go +++ b/pkg/slo-controller/noderesource/plugins/batchresource/plugin_test.go @@ -29,9 +29,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" + fakeclock "k8s.io/utils/clock/testing" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/builder" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -42,6 +42,7 @@ import ( slov1alpha1 "github.com/koordinator-sh/koordinator/apis/slo/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/slo-controller/noderesource/framework" "github.com/koordinator-sh/koordinator/pkg/util" + "github.com/koordinator-sh/koordinator/pkg/util/testutil" ) func makeResourceList(cpu, memory string) corev1.ResourceList { @@ -211,7 +212,7 @@ func TestPlugin(t *testing.T) { testOpt := &framework.Option{ Scheme: testScheme, Client: fake.NewClientBuilder().WithScheme(testScheme).Build(), - Builder: &builder.Builder{}, + Builder: builder.ControllerManagedBy(&testutil.FakeManager{}), Recorder: &record.FakeRecorder{}, } err = p.Setup(testOpt) @@ -875,7 +876,7 @@ func TestPreUpdate(t *testing.T) { testOpt := &framework.Option{ Scheme: testScheme, Client: fake.NewClientBuilder().WithScheme(testScheme).Build(), - Builder: &builder.Builder{}, + Builder: builder.ControllerManagedBy(&testutil.FakeManager{}), Recorder: &record.FakeRecorder{}, } if tt.fields.client != nil { @@ -1394,7 +1395,7 @@ func TestPrepare(t *testing.T) { testOpt := &framework.Option{ Scheme: testScheme, Client: fake.NewClientBuilder().WithScheme(testScheme).Build(), - Builder: &builder.Builder{}, + Builder: builder.ControllerManagedBy(&testutil.FakeManager{}), Recorder: &record.FakeRecorder{}, } if tt.fields.client != nil { @@ -1618,7 +1619,7 @@ func TestPrepareWithThirdParty(t *testing.T) { testOpt := &framework.Option{ Scheme: testScheme, Client: fake.NewClientBuilder().WithScheme(testScheme).Build(), - Builder: &builder.Builder{}, + Builder: builder.ControllerManagedBy(&testutil.FakeManager{}), Recorder: &record.FakeRecorder{}, } if tt.fields.client != nil { @@ -4033,7 +4034,7 @@ func TestPluginCalculate(t *testing.T) { testOpt := &framework.Option{ Scheme: testScheme, Client: fake.NewClientBuilder().WithScheme(testScheme).Build(), - Builder: &builder.Builder{}, + Builder: builder.ControllerManagedBy(&testutil.FakeManager{}), Recorder: &record.FakeRecorder{}, } if tt.fields.client != nil { @@ -4060,7 +4061,7 @@ func TestPluginCalculate(t *testing.T) { func TestPlugin_isDegradeNeeded(t *testing.T) { const degradeTimeoutMinutes = 10 type fields struct { - Clock clock.Clock + Clock *fakeclock.FakeClock } type args struct { strategy *configuration.ColocationStrategy @@ -4076,7 +4077,7 @@ func TestPlugin_isDegradeNeeded(t *testing.T) { { name: "empty NodeMetric should degrade", fields: fields{ - Clock: clock.RealClock{}, + Clock: &fakeclock.FakeClock{}, }, args: args{ nodeMetric: nil, @@ -4086,7 +4087,7 @@ func TestPlugin_isDegradeNeeded(t *testing.T) { { name: "empty NodeMetric status should degrade", fields: fields{ - Clock: clock.RealClock{}, + Clock: &fakeclock.FakeClock{}, }, args: args{ nodeMetric: &slov1alpha1.NodeMetric{}, @@ -4096,7 +4097,7 @@ func TestPlugin_isDegradeNeeded(t *testing.T) { { name: "outdated NodeMetric status should degrade", fields: fields{ - Clock: clock.RealClock{}, + Clock: nil, }, args: args{ strategy: &configuration.ColocationStrategy{ @@ -4131,7 +4132,7 @@ func TestPlugin_isDegradeNeeded(t *testing.T) { { name: "outdated NodeMetric status should degrade 1", fields: fields{ - Clock: clock.NewFakeClock(time.Now().Add(time.Minute * (degradeTimeoutMinutes + 1))), + Clock: fakeclock.NewFakeClock(time.Now().Add(time.Minute * (degradeTimeoutMinutes + 1))), }, args: args{ strategy: &configuration.ColocationStrategy{ @@ -4166,11 +4167,13 @@ func TestPlugin_isDegradeNeeded(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - oldClock := Clock - Clock = tt.fields.Clock - defer func() { - Clock = oldClock - }() + if tt.fields.Clock != nil { + oldClock := Clock + Clock = tt.fields.Clock + defer func() { + Clock = oldClock + }() + } p := &Plugin{} assert.Equal(t, tt.want, p.isDegradeNeeded(tt.args.strategy, tt.args.nodeMetric, tt.args.node)) diff --git a/pkg/slo-controller/noderesource/plugins/cpunormalization/noderesourcetopology_event_handler.go b/pkg/slo-controller/noderesource/plugins/cpunormalization/noderesourcetopology_event_handler.go index 12b0e8119..31172f63e 100644 --- a/pkg/slo-controller/noderesource/plugins/cpunormalization/noderesourcetopology_event_handler.go +++ b/pkg/slo-controller/noderesource/plugins/cpunormalization/noderesourcetopology_event_handler.go @@ -17,6 +17,8 @@ limitations under the License. package cpunormalization import ( + "context" + topologyv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" @@ -32,7 +34,7 @@ var _ handler.EventHandler = &nrtHandler{} type nrtHandler struct{} -func (h *nrtHandler) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (h *nrtHandler) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { nrt, ok := evt.Object.(*topologyv1alpha1.NodeResourceTopology) if !ok { return @@ -49,7 +51,7 @@ func (h *nrtHandler) Create(evt event.CreateEvent, q workqueue.RateLimitingInter }) } -func (h *nrtHandler) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (h *nrtHandler) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { nrtOld, okOld := evt.ObjectOld.(*topologyv1alpha1.NodeResourceTopology) nrtNew, okNew := evt.ObjectNew.(*topologyv1alpha1.NodeResourceTopology) if !okOld || !okNew { @@ -71,10 +73,10 @@ func (h *nrtHandler) Update(evt event.UpdateEvent, q workqueue.RateLimitingInter }) } -func (h *nrtHandler) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (h *nrtHandler) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { } -func (h *nrtHandler) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (h *nrtHandler) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { } func isNRTCPUBasicInfoCreated(nrt *topologyv1alpha1.NodeResourceTopology) bool { diff --git a/pkg/slo-controller/noderesource/plugins/cpunormalization/noderesourcetopology_event_handler_test.go b/pkg/slo-controller/noderesource/plugins/cpunormalization/noderesourcetopology_event_handler_test.go index bc473397b..c096b6522 100644 --- a/pkg/slo-controller/noderesource/plugins/cpunormalization/noderesourcetopology_event_handler_test.go +++ b/pkg/slo-controller/noderesource/plugins/cpunormalization/noderesourcetopology_event_handler_test.go @@ -17,6 +17,7 @@ limitations under the License. package cpunormalization import ( + "context" "testing" topologyv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1" @@ -197,7 +198,7 @@ func Test_EnqueueRequestForNodeResourceTopology(t *testing.T) { { name: "create NRT event", fn: func(handler *nrtHandler, q workqueue.RateLimitingInterface) { - handler.Create(event.CreateEvent{ + handler.Create(context.TODO(), event.CreateEvent{ Object: &topologyv1alpha1.NodeResourceTopology{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -215,7 +216,7 @@ func Test_EnqueueRequestForNodeResourceTopology(t *testing.T) { { name: "create event not NRT", fn: func(handler *nrtHandler, q workqueue.RateLimitingInterface) { - handler.Create(event.CreateEvent{ + handler.Create(context.TODO(), event.CreateEvent{ Object: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", @@ -228,7 +229,7 @@ func Test_EnqueueRequestForNodeResourceTopology(t *testing.T) { { name: "delete NRT event", fn: func(handler *nrtHandler, q workqueue.RateLimitingInterface) { - handler.Delete(event.DeleteEvent{ + handler.Delete(context.TODO(), event.DeleteEvent{ Object: &topologyv1alpha1.NodeResourceTopology{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -246,7 +247,7 @@ func Test_EnqueueRequestForNodeResourceTopology(t *testing.T) { { name: "delete event not NRT", fn: func(handler *nrtHandler, q workqueue.RateLimitingInterface) { - handler.Delete(event.DeleteEvent{ + handler.Delete(context.TODO(), event.DeleteEvent{ Object: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", @@ -259,7 +260,7 @@ func Test_EnqueueRequestForNodeResourceTopology(t *testing.T) { { name: "update NRT event", fn: func(handler *nrtHandler, q workqueue.RateLimitingInterface) { - handler.Update(event.UpdateEvent{ + handler.Update(context.TODO(), event.UpdateEvent{ ObjectOld: &topologyv1alpha1.NodeResourceTopology{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -286,7 +287,7 @@ func Test_EnqueueRequestForNodeResourceTopology(t *testing.T) { { name: "update node event ignore", fn: func(handler *nrtHandler, q workqueue.RateLimitingInterface) { - handler.Update(event.UpdateEvent{ + handler.Update(context.TODO(), event.UpdateEvent{ ObjectOld: &topologyv1alpha1.NodeResourceTopology{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -304,7 +305,7 @@ func Test_EnqueueRequestForNodeResourceTopology(t *testing.T) { { name: "generic node event ignore", fn: func(handler *nrtHandler, q workqueue.RateLimitingInterface) { - handler.Generic(event.GenericEvent{}, q) + handler.Generic(context.TODO(), event.GenericEvent{}, q) }, hasEvent: false, }, diff --git a/pkg/slo-controller/noderesource/plugins/cpunormalization/plugin.go b/pkg/slo-controller/noderesource/plugins/cpunormalization/plugin.go index 87e13d0db..5df5ae0fe 100644 --- a/pkg/slo-controller/noderesource/plugins/cpunormalization/plugin.go +++ b/pkg/slo-controller/noderesource/plugins/cpunormalization/plugin.go @@ -27,7 +27,6 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/klog/v2" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/koordinator-sh/koordinator/apis/configuration" "github.com/koordinator-sh/koordinator/apis/extension" @@ -67,10 +66,10 @@ func (p *Plugin) Setup(opt *framework.Option) error { if err := topologyv1alpha1.AddToScheme(clientgoscheme.Scheme); err != nil { return fmt.Errorf("failed to add client go scheme for NodeResourceTopology, err: %w", err) } - opt.Builder = opt.Builder.Watches(&source.Kind{Type: &topologyv1alpha1.NodeResourceTopology{}}, &nrtHandler{}) + opt.Builder = opt.Builder.Watches(&topologyv1alpha1.NodeResourceTopology{}, &nrtHandler{}) cfgHandler = newConfigHandler(opt.Client, DefaultCPUNormalizationCfg(), opt.Recorder) - opt.Builder = opt.Builder.Watches(&source.Kind{Type: &corev1.ConfigMap{}}, cfgHandler) + opt.Builder = opt.Builder.Watches(&corev1.ConfigMap{}, cfgHandler) return nil } diff --git a/pkg/slo-controller/noderesource/plugins/cpunormalization/plugin_test.go b/pkg/slo-controller/noderesource/plugins/cpunormalization/plugin_test.go index 08f4ca64f..b9986e115 100644 --- a/pkg/slo-controller/noderesource/plugins/cpunormalization/plugin_test.go +++ b/pkg/slo-controller/noderesource/plugins/cpunormalization/plugin_test.go @@ -34,6 +34,7 @@ import ( "github.com/koordinator-sh/koordinator/apis/extension" "github.com/koordinator-sh/koordinator/pkg/client/clientset/versioned/scheme" "github.com/koordinator-sh/koordinator/pkg/slo-controller/noderesource/framework" + "github.com/koordinator-sh/koordinator/pkg/util/testutil" ) func TestPlugin(t *testing.T) { @@ -45,7 +46,7 @@ func TestPlugin(t *testing.T) { testOpt := &framework.Option{ Scheme: testScheme, Client: fake.NewClientBuilder().WithScheme(testScheme).Build(), - Builder: &builder.Builder{}, + Builder: builder.ControllerManagedBy(&testutil.FakeManager{}), Recorder: &record.FakeRecorder{}, } err := p.Setup(testOpt) diff --git a/pkg/slo-controller/noderesource/plugins/gpudeviceresource/device_event_handler.go b/pkg/slo-controller/noderesource/plugins/gpudeviceresource/device_event_handler.go index e48075185..9d494a713 100644 --- a/pkg/slo-controller/noderesource/plugins/gpudeviceresource/device_event_handler.go +++ b/pkg/slo-controller/noderesource/plugins/gpudeviceresource/device_event_handler.go @@ -17,6 +17,7 @@ limitations under the License. package gpudeviceresource import ( + "context" "reflect" "k8s.io/apimachinery/pkg/types" @@ -32,7 +33,7 @@ var _ handler.EventHandler = &DeviceHandler{} type DeviceHandler struct{} -func (d *DeviceHandler) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { +func (d *DeviceHandler) Create(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { device := e.Object.(*schedulingv1alpha1.Device) q.Add(reconcile.Request{ NamespacedName: types.NamespacedName{ @@ -41,7 +42,7 @@ func (d *DeviceHandler) Create(e event.CreateEvent, q workqueue.RateLimitingInte }) } -func (d *DeviceHandler) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (d *DeviceHandler) Update(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { newDevice := e.ObjectNew.(*schedulingv1alpha1.Device) oldDevice := e.ObjectOld.(*schedulingv1alpha1.Device) if reflect.DeepEqual(newDevice.Spec, oldDevice.Spec) { @@ -54,7 +55,7 @@ func (d *DeviceHandler) Update(e event.UpdateEvent, q workqueue.RateLimitingInte }) } -func (d *DeviceHandler) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (d *DeviceHandler) Delete(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { device, ok := e.Object.(*schedulingv1alpha1.Device) if !ok { return @@ -66,5 +67,5 @@ func (d *DeviceHandler) Delete(e event.DeleteEvent, q workqueue.RateLimitingInte }) } -func (d *DeviceHandler) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { +func (d *DeviceHandler) Generic(ctx context.Context, e event.GenericEvent, q workqueue.RateLimitingInterface) { } diff --git a/pkg/slo-controller/noderesource/plugins/gpudeviceresource/device_event_handler_test.go b/pkg/slo-controller/noderesource/plugins/gpudeviceresource/device_event_handler_test.go index d7e344871..7aa77d72a 100644 --- a/pkg/slo-controller/noderesource/plugins/gpudeviceresource/device_event_handler_test.go +++ b/pkg/slo-controller/noderesource/plugins/gpudeviceresource/device_event_handler_test.go @@ -17,6 +17,7 @@ limitations under the License. package gpudeviceresource import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -40,7 +41,7 @@ func Test_EnqueueRequestForNodeMetricMetric(t *testing.T) { { name: "create device event", fn: func(handler handler.EventHandler, q workqueue.RateLimitingInterface) { - handler.Create(event.CreateEvent{ + handler.Create(context.TODO(), event.CreateEvent{ Object: &schedulingv1alpha1.Device{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -54,7 +55,7 @@ func Test_EnqueueRequestForNodeMetricMetric(t *testing.T) { { name: "delete device event", fn: func(handler handler.EventHandler, q workqueue.RateLimitingInterface) { - handler.Delete(event.DeleteEvent{ + handler.Delete(context.TODO(), event.DeleteEvent{ Object: &schedulingv1alpha1.Device{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -68,7 +69,7 @@ func Test_EnqueueRequestForNodeMetricMetric(t *testing.T) { { name: "delete event not device", fn: func(handler handler.EventHandler, q workqueue.RateLimitingInterface) { - handler.Delete(event.DeleteEvent{ + handler.Delete(context.TODO(), event.DeleteEvent{ Object: &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -81,14 +82,14 @@ func Test_EnqueueRequestForNodeMetricMetric(t *testing.T) { { name: "generic event ignore", fn: func(handler handler.EventHandler, q workqueue.RateLimitingInterface) { - handler.Generic(event.GenericEvent{}, q) + handler.Generic(context.TODO(), event.GenericEvent{}, q) }, hasEvent: false, }, { name: "update device event", fn: func(handler handler.EventHandler, q workqueue.RateLimitingInterface) { - handler.Update(event.UpdateEvent{ + handler.Update(context.TODO(), event.UpdateEvent{ ObjectOld: &schedulingv1alpha1.Device{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", @@ -120,7 +121,7 @@ func Test_EnqueueRequestForNodeMetricMetric(t *testing.T) { { name: "update device event ignore", fn: func(handler handler.EventHandler, q workqueue.RateLimitingInterface) { - handler.Update(event.UpdateEvent{ + handler.Update(context.TODO(), event.UpdateEvent{ ObjectOld: &schedulingv1alpha1.Device{ ObjectMeta: metav1.ObjectMeta{ Name: "node1", diff --git a/pkg/slo-controller/noderesource/plugins/gpudeviceresource/plugin.go b/pkg/slo-controller/noderesource/plugins/gpudeviceresource/plugin.go index cdd15288d..116e977ab 100644 --- a/pkg/slo-controller/noderesource/plugins/gpudeviceresource/plugin.go +++ b/pkg/slo-controller/noderesource/plugins/gpudeviceresource/plugin.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/source" "github.com/koordinator-sh/koordinator/apis/configuration" "github.com/koordinator-sh/koordinator/apis/extension" @@ -66,7 +65,7 @@ func (p *Plugin) Setup(opt *framework.Option) error { client = opt.Client // schedulingv1alpha1.AddToScheme(opt.Scheme) - opt.Builder = opt.Builder.Watches(&source.Kind{Type: &schedulingv1alpha1.Device{}}, &DeviceHandler{}) + opt.Builder = opt.Builder.Watches(&schedulingv1alpha1.Device{}, &DeviceHandler{}) return nil } diff --git a/pkg/slo-controller/noderesource/plugins/gpudeviceresource/plugin_test.go b/pkg/slo-controller/noderesource/plugins/gpudeviceresource/plugin_test.go index a7a02fdd5..e966ea1fc 100644 --- a/pkg/slo-controller/noderesource/plugins/gpudeviceresource/plugin_test.go +++ b/pkg/slo-controller/noderesource/plugins/gpudeviceresource/plugin_test.go @@ -36,6 +36,7 @@ import ( "github.com/koordinator-sh/koordinator/apis/extension" schedulingv1alpha1 "github.com/koordinator-sh/koordinator/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/slo-controller/noderesource/framework" + "github.com/koordinator-sh/koordinator/pkg/util/testutil" ) func TestPlugin(t *testing.T) { @@ -47,7 +48,7 @@ func TestPlugin(t *testing.T) { testOpt := &framework.Option{ Scheme: testScheme, Client: fake.NewClientBuilder().WithScheme(testScheme).Build(), - Builder: &builder.Builder{}, + Builder: builder.ControllerManagedBy(&testutil.FakeManager{}), } err := p.Setup(testOpt) assert.NoError(t, err) diff --git a/pkg/slo-controller/noderesource/plugins/midresource/plugin_test.go b/pkg/slo-controller/noderesource/plugins/midresource/plugin_test.go index e929dbdb7..12942501e 100644 --- a/pkg/slo-controller/noderesource/plugins/midresource/plugin_test.go +++ b/pkg/slo-controller/noderesource/plugins/midresource/plugin_test.go @@ -24,7 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/clock" + clock "k8s.io/utils/clock/testing" "k8s.io/utils/pointer" "github.com/koordinator-sh/koordinator/apis/configuration" @@ -529,7 +529,7 @@ func TestPluginCalculate(t *testing.T) { func TestPlugin_isDegradeNeeded(t *testing.T) { const degradeTimeoutMinutes = 10 type fields struct { - Clock clock.Clock + Clock *clock.FakeClock } type args struct { strategy *configuration.ColocationStrategy diff --git a/pkg/slo-controller/noderesource/resource_calculator_test.go b/pkg/slo-controller/noderesource/resource_calculator_test.go index 2fffb9a51..b32f4b5da 100644 --- a/pkg/slo-controller/noderesource/resource_calculator_test.go +++ b/pkg/slo-controller/noderesource/resource_calculator_test.go @@ -28,8 +28,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/clock" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -42,6 +42,7 @@ import ( "github.com/koordinator-sh/koordinator/pkg/slo-controller/noderesource/framework" "github.com/koordinator-sh/koordinator/pkg/slo-controller/noderesource/plugins/batchresource" "github.com/koordinator-sh/koordinator/pkg/slo-controller/noderesource/plugins/midresource" + "github.com/koordinator-sh/koordinator/pkg/util/testutil" ) func init() { @@ -1047,7 +1048,8 @@ func Test_calculateNodeResource(t *testing.T) { slov1alpha1.AddToScheme(scheme) schedulingv1alpha1.AddToScheme(scheme) client := fake.NewClientBuilder().WithScheme(scheme).Build() - opt := framework.NewOption().WithClient(client).WithScheme(scheme).WithControllerBuilder(&builder.Builder{}) + fakeBuilder := builder.ControllerManagedBy(&testutil.FakeManager{}) + opt := framework.NewOption().WithClient(client).WithScheme(scheme).WithControllerBuilder(fakeBuilder) framework.RunSetupExtenders(opt) for _, tt := range tests { @@ -1210,6 +1212,8 @@ func Test_isColocationCfgDisabled(t *testing.T) { } func Test_updateNodeResource(t *testing.T) { + scheme := runtime.NewScheme() + _ = clientgoscheme.AddToScheme(scheme) enabledCfg := &configuration.ColocationCfg{ ColocationStrategy: configuration.ColocationStrategy{ Enable: pointer.Bool(true), @@ -1701,18 +1705,18 @@ func Test_updateNodeResource(t *testing.T) { { name: "update node meta", fields: fields{ - Client: fake.NewClientBuilder().WithRuntimeObjects(&corev1.Node{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(&corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node0", }, Status: corev1.NodeStatus{ Allocatable: corev1.ResourceList{ - extension.BatchCPU: resource.MustParse("20"), - extension.BatchMemory: resource.MustParse("40G"), + corev1.ResourceCPU: resource.MustParse("20"), + corev1.ResourceMemory: resource.MustParse("40G"), }, Capacity: corev1.ResourceList{ - extension.BatchCPU: resource.MustParse("20"), - extension.BatchMemory: resource.MustParse("40G"), + corev1.ResourceCPU: resource.MustParse("20"), + corev1.ResourceMemory: resource.MustParse("40G"), }, }, }).Build(), diff --git a/pkg/slo-controller/nodeslo/nodeslo_controller.go b/pkg/slo-controller/nodeslo/nodeslo_controller.go index 8b1067097..3658587f2 100644 --- a/pkg/slo-controller/nodeslo/nodeslo_controller.go +++ b/pkg/slo-controller/nodeslo/nodeslo_controller.go @@ -31,7 +31,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" slov1alpha1 "github.com/koordinator-sh/koordinator/apis/slo/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/slo-controller/metrics" @@ -235,10 +234,10 @@ func (r *NodeSLOReconciler) SetupWithManager(mgr ctrl.Manager) error { r.sloCfgCache = configMapCacheHandler return ctrl.NewControllerManagedBy(mgr). For(&slov1alpha1.NodeSLO{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). - Watches(&source.Kind{Type: &corev1.Node{}}, &nodemetric.EnqueueRequestForNode{ + Watches(&corev1.Node{}, &nodemetric.EnqueueRequestForNode{ Client: r.Client, }). - Watches(&source.Kind{Type: &corev1.ConfigMap{}}, configMapCacheHandler). + Watches(&corev1.ConfigMap{}, configMapCacheHandler). Named(Name). Complete(r) } diff --git a/pkg/util/client/delegating_client.go b/pkg/util/client/delegating_client.go index 3e3392c78..63e36bc22 100644 --- a/pkg/util/client/delegating_client.go +++ b/pkg/util/client/delegating_client.go @@ -39,15 +39,17 @@ func init() { flag.BoolVar(&disableNoDeepCopy, "disable-no-deepcopy", false, "If you are going to disable NoDeepCopy List in some controllers and webhooks.") } +var _ client.NewClientFunc = NewClient + // NewClient creates the default caching client with disable deepcopy list from cache. -func NewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { +func NewClient(config *rest.Config, options client.Options) (client.Client, error) { c, err := client.New(config, options) if err != nil { return nil, err } uncachedGVKs := map[schema.GroupVersionKind]struct{}{} - for _, obj := range uncachedObjects { + for _, obj := range options.Cache.DisableFor { gvk, err := apiutil.GVKForObject(obj, c.Scheme()) if err != nil { return nil, err @@ -55,18 +57,22 @@ func NewClient(cache cache.Cache, config *rest.Config, options client.Options, u uncachedGVKs[gvk] = struct{}{} } + mgrCache := options.Cache.Reader.(cache.Cache) + return &delegatingClient{ scheme: c.Scheme(), mapper: c.RESTMapper(), Reader: &delegatingReader{ - CacheReader: cache, + CacheReader: options.Cache.Reader, ClientReader: c, - noDeepCopyLister: &noDeepCopyLister{cache: cache, scheme: c.Scheme()}, + noDeepCopyLister: &noDeepCopyLister{cache: mgrCache, scheme: c.Scheme()}, scheme: c.Scheme(), uncachedGVKs: uncachedGVKs, }, - Writer: c, - StatusClient: c, + Writer: c, + StatusClient: c, + SubResourceClientConstructor: c, + originClient: c, }, nil } @@ -74,9 +80,11 @@ type delegatingClient struct { client.Reader client.Writer client.StatusClient + client.SubResourceClientConstructor - scheme *runtime.Scheme - mapper meta.RESTMapper + originClient client.Client + scheme *runtime.Scheme + mapper meta.RESTMapper } // Scheme returns the scheme this client is using. @@ -89,6 +97,18 @@ func (d *delegatingClient) RESTMapper() meta.RESTMapper { return d.mapper } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (d *delegatingClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return d.originClient.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the object is namespaced. +func (d *delegatingClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return d.originClient.IsObjectNamespaced(obj) +} + +var _ client.Reader = &delegatingReader{} + // delegatingReader forms a Reader that will cause Get and List requests for // unstructured types to use the ClientReader while requests for any other type // of object with use the CacheReader. This avoids accidentally caching the @@ -127,13 +147,13 @@ func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) { } // Get retrieves an obj for a given object key from the Kubernetes Cluster. -func (d *delegatingReader) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { +func (d *delegatingReader) Get(ctx context.Context, key client.ObjectKey, obj client.Object, option ...client.GetOption) error { if isUncached, err := d.shouldBypassCache(obj); err != nil { return err } else if isUncached { - return d.ClientReader.Get(ctx, key, obj) + return d.ClientReader.Get(ctx, key, obj, option...) } - return d.CacheReader.Get(ctx, key, obj) + return d.CacheReader.Get(ctx, key, obj, option...) } // List retrieves list of objects for a given namespace and list options. diff --git a/pkg/util/fieldindex/register.go b/pkg/util/fieldindex/register.go index c29964210..6b17d42b8 100644 --- a/pkg/util/fieldindex/register.go +++ b/pkg/util/fieldindex/register.go @@ -24,9 +24,9 @@ import ( "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - apiv1alpha1 "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" + apiv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" ) var registerOnce sync.Once diff --git a/pkg/util/reservation/reservation.go b/pkg/util/reservation/reservation.go index 096481ff9..cd31c3c83 100644 --- a/pkg/util/reservation/reservation.go +++ b/pkg/util/reservation/reservation.go @@ -118,7 +118,7 @@ func NewReservePod(r *schedulingv1alpha1.Reservation) *corev1.Pod { reservePod.Status.Phase = corev1.PodFailed } if IsReservationAvailable(r) { - podRequests, _ := resource.PodRequestsAndLimits(reservePod) + podRequests := resource.PodRequests(reservePod, resource.PodResourcesOptions{}) if !quotav1.Equals(podRequests, r.Status.Allocatable) { // // PodRequests is different from r.Status.Allocatable, @@ -133,7 +133,7 @@ func NewReservePod(r *schedulingv1alpha1.Reservation) *corev1.Pod { func UpdateReservePodWithAllocatable(reservePod *corev1.Pod, podRequests, allocatable corev1.ResourceList) { if podRequests == nil { - podRequests, _ = resource.PodRequestsAndLimits(reservePod) + podRequests = resource.PodRequests(reservePod, resource.PodResourcesOptions{}) } else { podRequests = podRequests.DeepCopy() } @@ -349,9 +349,9 @@ func ReservationRequests(r *schedulingv1alpha1.Reservation) corev1.ResourceList return r.Status.Allocatable.DeepCopy() } if r.Spec.Template != nil { - requests, _ := resource.PodRequestsAndLimits(&corev1.Pod{ + requests := resource.PodRequests(&corev1.Pod{ Spec: r.Spec.Template.Spec, - }) + }, resource.PodResourcesOptions{}) return requests } return nil diff --git a/pkg/util/reservation/reservation_to_pod_eventhandler.go b/pkg/util/reservation/reservation_to_pod_eventhandler.go index fa2f21a07..184b2f62c 100644 --- a/pkg/util/reservation/reservation_to_pod_eventhandler.go +++ b/pkg/util/reservation/reservation_to_pod_eventhandler.go @@ -58,13 +58,13 @@ func NewReservationToPodEventHandler(handler cache.ResourceEventHandler, filters } } -func (r ReservationToPodEventHandler) OnAdd(obj interface{}) { +func (r ReservationToPodEventHandler) OnAdd(obj interface{}, isInInitialList bool) { reservation, ok := obj.(*schedulingv1alpha1.Reservation) if !ok { return } pod := NewReservePod(reservation) - r.handler.OnAdd(pod) + r.handler.OnAdd(pod, isInInitialList) } // OnUpdate calls UpdateFunc if it's not nil. diff --git a/pkg/util/reservation/reservation_to_pod_eventhandler_test.go b/pkg/util/reservation/reservation_to_pod_eventhandler_test.go index 2aad1d50f..c72f9edda 100644 --- a/pkg/util/reservation/reservation_to_pod_eventhandler_test.go +++ b/pkg/util/reservation/reservation_to_pod_eventhandler_test.go @@ -150,7 +150,7 @@ type fakePodHandler struct { t *testing.T } -func (f *fakePodHandler) OnAdd(obj interface{}) { +func (f *fakePodHandler) OnAdd(obj interface{}, isInInitialList bool) { _, ok := obj.(*corev1.Pod) if !ok { f.t.Errorf("OnAdd got object %T, but not a pod", obj) @@ -216,7 +216,7 @@ func TestReservationToPodEventHandler(t *testing.T) { }, ) - h.OnAdd(testReservation) + h.OnAdd(testReservation, false) h.OnUpdate(testReservation, testReservation) @@ -230,7 +230,7 @@ func TestReservationToPodEventHandler(t *testing.T) { }, ) - h.OnAdd(testReservation) + h.OnAdd(testReservation, false) h.OnUpdate(testReservation, testReservation) h.OnDelete(testReservation) }) diff --git a/pkg/util/testutil/manager.go b/pkg/util/testutil/manager.go new file mode 100644 index 000000000..b33c1f8ee --- /dev/null +++ b/pkg/util/testutil/manager.go @@ -0,0 +1,31 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/informertest" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +type FakeManager struct { + manager.Manager +} + +func (mgr *FakeManager) GetCache() cache.Cache { + return &informertest.FakeInformers{} +} diff --git a/pkg/util/transformer/elastic_quota_transformer.go b/pkg/util/transformer/elastic_quota_transformer.go index 3339eb0c0..a6f8bb982 100644 --- a/pkg/util/transformer/elastic_quota_transformer.go +++ b/pkg/util/transformer/elastic_quota_transformer.go @@ -19,10 +19,10 @@ package transformer import ( "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" - "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions" apiext "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions" ) func SetupElasticQuotaTransformers(factory externalversions.SharedInformerFactory) { diff --git a/pkg/util/transformer/elastic_quota_transformer_test.go b/pkg/util/transformer/elastic_quota_transformer_test.go index 0a071fc02..64052e7bb 100644 --- a/pkg/util/transformer/elastic_quota_transformer_test.go +++ b/pkg/util/transformer/elastic_quota_transformer_test.go @@ -22,9 +22,9 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" apiext "github.com/koordinator-sh/koordinator/apis/extension" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" ) func TestTransformElasticQuota(t *testing.T) { diff --git a/pkg/webhook/add_configmap.go b/pkg/webhook/add_configmap.go index 192ad55d1..3f9020ccc 100644 --- a/pkg/webhook/add_configmap.go +++ b/pkg/webhook/add_configmap.go @@ -23,8 +23,7 @@ import ( ) func init() { - - addHandlersWithGate(validating.HandlerMap, func() (enabled bool) { + addHandlersWithGate(validating.HandlerBuilderMap, func() (enabled bool) { return utilfeature.DefaultFeatureGate.Enabled(features.ConfigMapValidatingWebhook) }) } diff --git a/pkg/webhook/add_node.go b/pkg/webhook/add_node.go index f995cdf76..eff3df261 100644 --- a/pkg/webhook/add_node.go +++ b/pkg/webhook/add_node.go @@ -25,11 +25,11 @@ import ( func init() { - addHandlersWithGate(validating.HandlerMap, func() (enabled bool) { + addHandlersWithGate(validating.HandlerBuilderMap, func() (enabled bool) { return utilfeature.DefaultFeatureGate.Enabled(features.NodeValidatingWebhook) }) - addHandlersWithGate(mutating.HandlerMap, func() (enabled bool) { + addHandlersWithGate(mutating.HandlerBuilderMap, func() (enabled bool) { return utilfeature.DefaultFeatureGate.Enabled(features.NodeMutatingWebhook) }) } diff --git a/pkg/webhook/add_pod.go b/pkg/webhook/add_pod.go index 35b29e114..51c8e345e 100644 --- a/pkg/webhook/add_pod.go +++ b/pkg/webhook/add_pod.go @@ -24,11 +24,11 @@ import ( ) func init() { - addHandlersWithGate(mutating.HandlerMap, func() (enabled bool) { + addHandlersWithGate(mutating.HandlerBuilderMap, func() (enabled bool) { return utilfeature.DefaultFeatureGate.Enabled(features.PodMutatingWebhook) }) - addHandlersWithGate(validating.HandlerMap, func() (enabled bool) { + addHandlersWithGate(validating.HandlerBuilderMap, func() (enabled bool) { return utilfeature.DefaultFeatureGate.Enabled(features.PodValidatingWebhook) }) } diff --git a/pkg/webhook/add_quota.go b/pkg/webhook/add_quota.go index f646c6d54..8713f543e 100644 --- a/pkg/webhook/add_quota.go +++ b/pkg/webhook/add_quota.go @@ -24,11 +24,11 @@ import ( ) func init() { - addHandlersWithGate(mutating.HandlerMap, func() (enabled bool) { + addHandlersWithGate(mutating.HandlerBuilderMap, func() (enabled bool) { return utilfeature.DefaultFeatureGate.Enabled(features.ElasticQuotaMutatingWebhook) }) - addHandlersWithGate(validating.HandlerMap, func() (enabled bool) { + addHandlersWithGate(validating.HandlerBuilderMap, func() (enabled bool) { return utilfeature.DefaultFeatureGate.Enabled(features.ElasticQuotaValidatingWebhook) }) diff --git a/pkg/webhook/api_debug_server.go b/pkg/webhook/api_debug_server.go index c36e29bec..26ed253d7 100644 --- a/pkg/webhook/api_debug_server.go +++ b/pkg/webhook/api_debug_server.go @@ -29,9 +29,9 @@ func RegisterDebugAPIProvider(name string, provider http.Handler) { debugAPIProviderMap[name] = provider } -func InstallDebugAPIHandler(server *webhook.Server) { +func InstallDebugAPIHandler(server webhook.Server) { for name, provider := range debugAPIProviderMap { server.Register(name, provider) - klog.Infof("Success register debug api handler, name:%v, tcpAddr:%s:%d", name, server.Host, server.Port) + klog.Infof("Success register debug api handler, name:%v", name) } } diff --git a/pkg/webhook/cm/validating/validating_handler.go b/pkg/webhook/cm/validating/validating_handler.go index d46c63193..19266e565 100644 --- a/pkg/webhook/cm/validating/validating_handler.go +++ b/pkg/webhook/cm/validating/validating_handler.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/koordinator-sh/koordinator/pkg/util" @@ -43,8 +42,11 @@ type ConfigMapValidatingHandler struct { Decoder *admission.Decoder } -func NewConfigMapValidatingHandler() *ConfigMapValidatingHandler { - handler := &ConfigMapValidatingHandler{} +func NewConfigMapValidatingHandler(c client.Client, d *admission.Decoder) *ConfigMapValidatingHandler { + handler := &ConfigMapValidatingHandler{ + Client: c, + Decoder: d, + } return handler } @@ -110,7 +112,7 @@ func (h *ConfigMapValidatingHandler) getPlugins() []plugins.ConfigMapPlugin { return []plugins.ConfigMapPlugin{sloconfig.NewPlugin(h.Decoder, h.Client)} } -var _ inject.Client = &ConfigMapValidatingHandler{} +// var _ inject.Client = &ConfigMapValidatingHandler{} // InjectClient injects the client into the ValidatingHandler func (h *ConfigMapValidatingHandler) InjectClient(c client.Client) error { @@ -118,7 +120,7 @@ func (h *ConfigMapValidatingHandler) InjectClient(c client.Client) error { return nil } -var _ admission.DecoderInjector = &ConfigMapValidatingHandler{} +// var _ admission.DecoderInjector = &ConfigMapValidatingHandler{} // InjectDecoder injects the decoder into the ValidatingHandler func (h *ConfigMapValidatingHandler) InjectDecoder(d *admission.Decoder) error { diff --git a/pkg/webhook/cm/validating/validating_handler_test.go b/pkg/webhook/cm/validating/validating_handler_test.go index 97f52293f..73161f2bc 100644 --- a/pkg/webhook/cm/validating/validating_handler_test.go +++ b/pkg/webhook/cm/validating/validating_handler_test.go @@ -32,10 +32,8 @@ import ( func makeTestHandler() *ConfigMapValidatingHandler { client := fake.NewClientBuilder().Build() sche := client.Scheme() - decoder, _ := admission.NewDecoder(sche) - handler := NewConfigMapValidatingHandler() - handler.InjectClient(client) - handler.InjectDecoder(decoder) + decoder := admission.NewDecoder(sche) + handler := NewConfigMapValidatingHandler(client, decoder) return handler } diff --git a/pkg/webhook/cm/validating/webhooks.go b/pkg/webhook/cm/validating/webhooks.go index 43417a726..e19199a82 100644 --- a/pkg/webhook/cm/validating/webhooks.go +++ b/pkg/webhook/cm/validating/webhooks.go @@ -17,14 +17,33 @@ limitations under the License. package validating import ( + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/koordinator-sh/koordinator/pkg/webhook/util/framework" ) // +kubebuilder:webhook:path=/validate-configmap,mutating=false,failurePolicy=fail,sideEffects=None,groups="",resources=configmaps,verbs=create;update;delete,versions=v1,name=vconfigmap.koordinator.sh,admissionReviewVersions={v1beta1} var ( - // HandlerMap contains admission webhook handlers - HandlerMap = map[string]admission.Handler{ - "validate-configmap": NewConfigMapValidatingHandler(), + // HandlerBuilderMap HandlerMap contains admission webhook handlers builder + HandlerBuilderMap = map[string]framework.HandlerBuilder{ + "validate-configmap": &cmBuilder{}, } ) + +var _ framework.HandlerBuilder = &cmBuilder{} + +type cmBuilder struct { + mgr manager.Manager +} + +func (b *cmBuilder) WithControllerManager(mgr ctrl.Manager) framework.HandlerBuilder { + b.mgr = mgr + return b +} + +func (b *cmBuilder) Build() admission.Handler { + return NewConfigMapValidatingHandler(b.mgr.GetClient(), admission.NewDecoder(b.mgr.GetScheme())) +} diff --git a/pkg/webhook/elasticquota/mutating/mutating_handle_test.go b/pkg/webhook/elasticquota/mutating/mutating_handle_test.go index 34b117255..0bea52f7e 100644 --- a/pkg/webhook/elasticquota/mutating/mutating_handle_test.go +++ b/pkg/webhook/elasticquota/mutating/mutating_handle_test.go @@ -27,14 +27,15 @@ import ( "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" ) func makeTestHandler(t *testing.T) *ElasticQuotaMutatingHandler { client := fake.NewClientBuilder().Build() sche := client.Scheme() v1alpha1.AddToScheme(sche) - decoder, _ := admission.NewDecoder(sche) + decoder := admission.NewDecoder(sche) handler := &ElasticQuotaMutatingHandler{} handler.InjectClient(client) handler.InjectDecoder(decoder) diff --git a/pkg/webhook/elasticquota/mutating/mutating_handler.go b/pkg/webhook/elasticquota/mutating/mutating_handler.go index 95bcc9880..ad0cd086e 100644 --- a/pkg/webhook/elasticquota/mutating/mutating_handler.go +++ b/pkg/webhook/elasticquota/mutating/mutating_handler.go @@ -25,9 +25,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/webhook/elasticquota" ) @@ -81,7 +81,7 @@ func (h *ElasticQuotaMutatingHandler) Handle(ctx context.Context, request admiss return admission.PatchResponseFromRaw(request.AdmissionRequest.Object.Raw, marshaled) } -var _ inject.Client = &ElasticQuotaMutatingHandler{} +// var _ inject.Client = &ElasticQuotaMutatingHandler{} // InjectClient injects the client into the ElasticQuotaMutatingHandler func (h *ElasticQuotaMutatingHandler) InjectClient(c client.Client) error { @@ -89,7 +89,7 @@ func (h *ElasticQuotaMutatingHandler) InjectClient(c client.Client) error { return nil } -var _ admission.DecoderInjector = &ElasticQuotaMutatingHandler{} +// var _ admission.DecoderInjector = &ElasticQuotaMutatingHandler{} // InjectDecoder injects the decoder into the ElasticQuotaMutatingHandler func (h *ElasticQuotaMutatingHandler) InjectDecoder(decoder *admission.Decoder) error { diff --git a/pkg/webhook/elasticquota/mutating/webhooks.go b/pkg/webhook/elasticquota/mutating/webhooks.go index 5e1aebedf..013c51c12 100644 --- a/pkg/webhook/elasticquota/mutating/webhooks.go +++ b/pkg/webhook/elasticquota/mutating/webhooks.go @@ -17,14 +17,36 @@ limitations under the License. package mutating import ( + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/koordinator-sh/koordinator/pkg/webhook/util/framework" ) // +kubebuilder:webhook:path=/mutate-scheduling-sigs-k8s-io-v1alpha1-elasticquota,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=scheduling.sigs.k8s.io,resources=elasticquotas,verbs=create,versions=v1alpha1,name=melasticquota.koordinator.sh var ( - // HandlerMap contains admission webhook handlers - HandlerMap = map[string]admission.Handler{ - "mutate-scheduling-sigs-k8s-io-v1alpha1-elasticquota": &ElasticQuotaMutatingHandler{}, + // HandlerBuilderMap contains admission webhook handlers builders + HandlerBuilderMap = map[string]framework.HandlerBuilder{ + "mutate-scheduling-sigs-k8s-io-v1alpha1-elasticquota": "aMutateBuilder{}, } ) + +var _ framework.HandlerBuilder = "aMutateBuilder{} + +type quotaMutateBuilder struct { + mgr manager.Manager +} + +func (b *quotaMutateBuilder) WithControllerManager(mgr ctrl.Manager) framework.HandlerBuilder { + b.mgr = mgr + return b +} + +func (b *quotaMutateBuilder) Build() admission.Handler { + return &ElasticQuotaMutatingHandler{ + Client: b.mgr.GetClient(), + Decoder: admission.NewDecoder(b.mgr.GetScheme()), + } +} diff --git a/pkg/webhook/elasticquota/plugin_check_quota_meta_validate.go b/pkg/webhook/elasticquota/plugin_check_quota_meta_validate.go index 0b657d134..fe656f9a3 100644 --- a/pkg/webhook/elasticquota/plugin_check_quota_meta_validate.go +++ b/pkg/webhook/elasticquota/plugin_check_quota_meta_validate.go @@ -26,7 +26,8 @@ import ( "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" ) type QuotaMetaChecker struct { diff --git a/pkg/webhook/elasticquota/plugin_check_quota_meta_validate_test.go b/pkg/webhook/elasticquota/plugin_check_quota_meta_validate_test.go index b3ff7ac11..6f6e46c94 100644 --- a/pkg/webhook/elasticquota/plugin_check_quota_meta_validate_test.go +++ b/pkg/webhook/elasticquota/plugin_check_quota_meta_validate_test.go @@ -27,7 +27,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" ) @@ -39,7 +40,7 @@ func TestQuotaMetaChecker(t *testing.T) { Group: "scheduling.sigs.k8s.io", Version: "v1alpha1", }, &v1alpha1.ElasticQuota{}, &v1alpha1.ElasticQuotaList{}) - decoder, _ := admission.NewDecoder(sche) + decoder := admission.NewDecoder(sche) plugin := NewPlugin(decoder, client) diff --git a/pkg/webhook/elasticquota/pod_check.go b/pkg/webhook/elasticquota/pod_check.go index c88e6705e..ffcbacf1b 100644 --- a/pkg/webhook/elasticquota/pod_check.go +++ b/pkg/webhook/elasticquota/pod_check.go @@ -26,7 +26,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" "github.com/koordinator-sh/koordinator/pkg/features" diff --git a/pkg/webhook/elasticquota/quota_info.go b/pkg/webhook/elasticquota/quota_info.go index 436519536..ddb1fdfbc 100644 --- a/pkg/webhook/elasticquota/quota_info.go +++ b/pkg/webhook/elasticquota/quota_info.go @@ -18,7 +18,8 @@ package elasticquota import ( v1 "k8s.io/api/core/v1" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" ) diff --git a/pkg/webhook/elasticquota/quota_topology.go b/pkg/webhook/elasticquota/quota_topology.go index 8895dfb0c..0015a027e 100644 --- a/pkg/webhook/elasticquota/quota_topology.go +++ b/pkg/webhook/elasticquota/quota_topology.go @@ -27,7 +27,8 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" utilclient "github.com/koordinator-sh/koordinator/pkg/util/client" diff --git a/pkg/webhook/elasticquota/quota_topology_check.go b/pkg/webhook/elasticquota/quota_topology_check.go index 689021eb3..ec3bbaba9 100644 --- a/pkg/webhook/elasticquota/quota_topology_check.go +++ b/pkg/webhook/elasticquota/quota_topology_check.go @@ -27,7 +27,8 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" "github.com/koordinator-sh/koordinator/pkg/features" diff --git a/pkg/webhook/elasticquota/quota_topology_test.go b/pkg/webhook/elasticquota/quota_topology_test.go index ac6bd8576..1fc5e114c 100644 --- a/pkg/webhook/elasticquota/quota_topology_test.go +++ b/pkg/webhook/elasticquota/quota_topology_test.go @@ -29,7 +29,8 @@ import ( testing2 "k8s.io/kubernetes/pkg/scheduler/testing" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" koordfeatures "github.com/koordinator-sh/koordinator/pkg/features" @@ -505,7 +506,9 @@ func TestQuotaTopology_ValidUpdateQuota(t *testing.T) { assert.Equal(t, fmt.Sprintf("quota has children, isParent is forbidden to modify as false, quotaName:%v", quota1.Name), err.Error()) pod1 := MakePod("", "pod1").Label(extension.LabelQuotaName, "sub-1").Obj() - client := fake.NewClientBuilder().Build() + client := fake.NewClientBuilder().WithIndex(&v1.Pod{}, "label.quotaName", func(object client.Object) []string { + return []string{object.(*v1.Pod).Labels[extension.LabelQuotaName]} + }).Build() v1alpha1.AddToScheme(client.Scheme()) qt.client = client qt.client.Create(context.TODO(), pod1) @@ -525,7 +528,7 @@ func TestQuotaTopology_ValidUpdateQuota(t *testing.T) { assert.Equal(t, fmt.Sprintf("quota has bound pods, isParent is forbidden to modify as true, quotaName: sub-1"), err.Error()) qt.client.Delete(context.TODO(), pod2) - pod3 := MakePod("sub-2", "pod3").Obj() + pod3 := MakePod("sub-2", "pod3").Label(extension.LabelQuotaName, "sub-1").Obj() qt.client.Create(context.TODO(), pod3) sub1.Annotations[extension.AnnotationQuotaNamespaces] = "[\"namespace1\",\"namespace2\"]" @@ -575,7 +578,9 @@ func TestQuotaTopology_ListQuotaPods(t *testing.T) { func TestQuotaTopology_AnnotationNamespaces(t *testing.T) { quota := MakeQuota("temp").Annotations(map[string]string{extension.AnnotationQuotaNamespaces: "[\"test1\",\"test2\"]"}).Obj() qt := newFakeQuotaTopology() - client := fake.NewClientBuilder().Build() + client := fake.NewClientBuilder().WithIndex(&v1.Pod{}, "label.quotaName", func(object client.Object) []string { + return []string{object.(*v1.Pod).Labels["label.quotaName"]} + }).Build() v1alpha1.AddToScheme(client.Scheme()) qt.client = client @@ -619,7 +624,9 @@ func TestQuotaTopology_AnnotationNamespaces(t *testing.T) { func TestQuotaTopology_ValidDeleteQuota(t *testing.T) { qt := newFakeQuotaTopology() - client := fake.NewClientBuilder().Build() + client := fake.NewClientBuilder().WithIndex(&v1.Pod{}, "label.quotaName", func(object client.Object) []string { + return []string{object.(*v1.Pod).Labels[extension.LabelQuotaName]} + }).Build() v1alpha1.AddToScheme(client.Scheme()) qt.client = client @@ -649,26 +656,26 @@ func TestQuotaTopology_ValidDeleteQuota(t *testing.T) { assert.Equal(t, 0, len(qt.quotaHierarchyInfo["temp2"])) err = qt.ValidDeleteQuota(quota1) - assert.True(t, err == nil) + assert.NoError(t, err) assert.Equal(t, 2, len(qt.quotaInfoMap)) assert.Equal(t, 3, len(qt.quotaHierarchyInfo)) assert.Equal(t, 1, len(qt.quotaHierarchyInfo["temp"])) // add pod to quota sub-1 - pod := MakePod("sub-1", "pod1").Obj() + pod := MakePod("sub-1", "pod1").Label(extension.LabelQuotaName, "sub-1").Obj() err = qt.client.Create(context.TODO(), pod) assert.Nil(t, err) // forbidden delete quota with pods err = qt.ValidDeleteQuota(sub1) - assert.True(t, err != nil) + assert.Error(t, err) // delete pod err = qt.client.Delete(context.TODO(), pod) assert.Nil(t, err) err = qt.ValidDeleteQuota(sub1) - assert.True(t, err == nil) + assert.NoError(t, err) assert.Equal(t, 1, len(qt.quotaInfoMap)) assert.Equal(t, 2, len(qt.quotaHierarchyInfo)) assert.Equal(t, 0, len(qt.quotaHierarchyInfo["temp"])) diff --git a/pkg/webhook/elasticquota/validating/validating_handler.go b/pkg/webhook/elasticquota/validating/validating_handler.go index 8b0e93f34..8d831de6f 100644 --- a/pkg/webhook/elasticquota/validating/validating_handler.go +++ b/pkg/webhook/elasticquota/validating/validating_handler.go @@ -27,9 +27,9 @@ import ( "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/util" "github.com/koordinator-sh/koordinator/pkg/webhook/elasticquota" @@ -90,7 +90,7 @@ func (h *ElasticQuotaValidatingHandler) Handle(ctx context.Context, request admi return admission.ValidationResponse(true, "") } -var _ inject.Client = &ElasticQuotaValidatingHandler{} +// var _ inject.Client = &ElasticQuotaValidatingHandler{} // InjectClient injects the client into the ElasticQuotaValidatingHandler func (h *ElasticQuotaValidatingHandler) InjectClient(c client.Client) error { @@ -98,7 +98,7 @@ func (h *ElasticQuotaValidatingHandler) InjectClient(c client.Client) error { return nil } -var _ admission.DecoderInjector = &ElasticQuotaValidatingHandler{} +// var _ admission.DecoderInjector = &ElasticQuotaValidatingHandler{} // InjectDecoder injects the client into the ElasticQuotaValidatingHandler func (h *ElasticQuotaValidatingHandler) InjectDecoder(d *admission.Decoder) error { @@ -106,7 +106,7 @@ func (h *ElasticQuotaValidatingHandler) InjectDecoder(d *admission.Decoder) erro return nil } -var _ inject.Cache = &ElasticQuotaValidatingHandler{} +// var _ inject.Cache = &ElasticQuotaValidatingHandler{} func (h *ElasticQuotaValidatingHandler) InjectCache(cache cache.Cache) error { ctx := context.TODO() @@ -121,12 +121,12 @@ func (h *ElasticQuotaValidatingHandler) InjectCache(cache cache.Cache) error { } plugin := elasticquota.NewPlugin(h.Decoder, h.Client) qt := plugin.QuotaTopo - quotaInformer.AddEventHandler(clientcache.ResourceEventHandlerFuncs{ + _, err = quotaInformer.AddEventHandler(clientcache.ResourceEventHandlerFuncs{ AddFunc: qt.OnQuotaAdd, UpdateFunc: qt.OnQuotaUpdate, DeleteFunc: qt.OnQuotaDelete, }) - return nil + return err } var _ http.Handler = &ElasticQuotaValidatingHandler{} diff --git a/pkg/webhook/elasticquota/validating/validating_handler_test.go b/pkg/webhook/elasticquota/validating/validating_handler_test.go index 108279aad..36ca1ec6e 100644 --- a/pkg/webhook/elasticquota/validating/validating_handler_test.go +++ b/pkg/webhook/elasticquota/validating/validating_handler_test.go @@ -30,16 +30,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache/informertest" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" - pgfake "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned/fake" - "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + pgfake "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions" ) func makeTestHandler() *ElasticQuotaValidatingHandler { client := fake.NewClientBuilder().Build() sche := client.Scheme() v1alpha1.AddToScheme(sche) - decoder, _ := admission.NewDecoder(sche) + decoder := admission.NewDecoder(sche) handler := &ElasticQuotaValidatingHandler{} handler.InjectClient(client) handler.InjectDecoder(decoder) diff --git a/pkg/webhook/elasticquota/validating/webhooks.go b/pkg/webhook/elasticquota/validating/webhooks.go index cadc7d799..fda83ccb5 100644 --- a/pkg/webhook/elasticquota/validating/webhooks.go +++ b/pkg/webhook/elasticquota/validating/webhooks.go @@ -17,14 +17,42 @@ limitations under the License. package validating import ( + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/koordinator-sh/koordinator/pkg/webhook/util/framework" ) // +kubebuilder:webhook:path=/validate-scheduling-sigs-k8s-io-v1alpha1-elasticquota,mutating=false,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=scheduling.sigs.k8s.io,resources=elasticquotas,verbs=create;update;delete,versions=v1alpha1,name=velasticquota.koordinator.sh var ( - // HandlerMap contains admission webhook handlers - HandlerMap = map[string]admission.Handler{ - "validate-scheduling-sigs-k8s-io-v1alpha1-elasticquota": &ElasticQuotaValidatingHandler{}, + // HandlerBuilderMap contains admission webhook handlers builder + HandlerBuilderMap = map[string]framework.HandlerBuilder{ + "validate-scheduling-sigs-k8s-io-v1alpha1-elasticquota": "aValidateBuilder{}, } ) + +var _ framework.HandlerBuilder = "aValidateBuilder{} + +type quotaValidateBuilder struct { + mgr manager.Manager +} + +func (b *quotaValidateBuilder) WithControllerManager(mgr ctrl.Manager) framework.HandlerBuilder { + b.mgr = mgr + return b +} + +func (b *quotaValidateBuilder) Build() admission.Handler { + h := &ElasticQuotaValidatingHandler{ + Client: b.mgr.GetClient(), + Decoder: admission.NewDecoder(b.mgr.GetScheme()), + } + err := h.InjectCache(b.mgr.GetCache()) + if err != nil { + klog.Fatal("failed to inject cache for quotaValidateBuilder: %v", err) + } + return h +} diff --git a/pkg/webhook/node/mutating/mutating_handler.go b/pkg/webhook/node/mutating/mutating_handler.go index 2e40846e6..c73d137a6 100644 --- a/pkg/webhook/node/mutating/mutating_handler.go +++ b/pkg/webhook/node/mutating/mutating_handler.go @@ -26,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/koordinator-sh/koordinator/pkg/webhook/node/plugins" @@ -69,10 +68,12 @@ func shouldIgnoreIfNotNode(req admission.Request) bool { } */ -// NewNodeMutatingHandler creates a new handler for node/status. -func NewNodeStatusMutatingHandler() *NodeMutatingHandler { +// NewNodeStatusMutatingHandler creates a new handler for node/status. +func NewNodeStatusMutatingHandler(c client.Client, d *admission.Decoder) *NodeMutatingHandler { handler := &NodeMutatingHandler{ ignoreFilter: shouldIgnoreIfNotNodeStatus, + Client: c, + Decoder: d, } return handler } @@ -138,7 +139,7 @@ func (h *NodeMutatingHandler) Handle(ctx context.Context, req admission.Request) return admission.PatchResponseFromRaw(original, marshaled) } -var _ inject.Client = &NodeMutatingHandler{} +// var _ inject.Client = &NodeMutatingHandler{} // InjectClient injects the client into the PodMutatingHandler func (n *NodeMutatingHandler) InjectClient(c client.Client) error { @@ -146,7 +147,7 @@ func (n *NodeMutatingHandler) InjectClient(c client.Client) error { return nil } -var _ admission.DecoderInjector = &NodeMutatingHandler{} +// var _ admission.DecoderInjector = &NodeMutatingHandler{} // InjectDecoder injects the decoder into the PodMutatingHandler func (n *NodeMutatingHandler) InjectDecoder(d *admission.Decoder) error { diff --git a/pkg/webhook/node/mutating/mutating_handler_test.go b/pkg/webhook/node/mutating/mutating_handler_test.go index 30774a6a6..a8d341e36 100644 --- a/pkg/webhook/node/mutating/mutating_handler_test.go +++ b/pkg/webhook/node/mutating/mutating_handler_test.go @@ -59,10 +59,8 @@ func (n *mockNodePlugin) Admit(ctx context.Context, req admission.Request, node, } func TestNodeMutatingHandler_Handle(t *testing.T) { - decoder, _ := admission.NewDecoder(scheme.Scheme) - handler := NewNodeStatusMutatingHandler() - handler.InjectDecoder(decoder) - handler.InjectClient(fake.NewClientBuilder().Build()) + decoder := admission.NewDecoder(scheme.Scheme) + handler := NewNodeStatusMutatingHandler(fake.NewClientBuilder().Build(), decoder) mockRequest := admission.Request{ AdmissionRequest: admissionv1.AdmissionRequest{ diff --git a/pkg/webhook/node/mutating/webhooks.go b/pkg/webhook/node/mutating/webhooks.go index d52ae7b48..f8f9cfb31 100644 --- a/pkg/webhook/node/mutating/webhooks.go +++ b/pkg/webhook/node/mutating/webhooks.go @@ -17,14 +17,33 @@ limitations under the License. package mutating import ( + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/koordinator-sh/koordinator/pkg/webhook/util/framework" ) // +kubebuilder:webhook:path=/mutate-node-status,mutating=true,failurePolicy=ignore,sideEffects=None,groups="",resources=nodes/status,verbs=create;update,versions=v1,name=mnode-status.koordinator.sh,admissionReviewVersions=v1;v1beta1 var ( - // HandlerMap contains admission webhook handlers - HandlerMap = map[string]admission.Handler{ - "mutate-node-status": NewNodeStatusMutatingHandler(), + // HandlerBuilderMap contains admission webhook handlers builder + HandlerBuilderMap = map[string]framework.HandlerBuilder{ + "mutate-node-status": &nodeMutateBuilder{}, } ) + +var _ framework.HandlerBuilder = &nodeMutateBuilder{} + +type nodeMutateBuilder struct { + mgr manager.Manager +} + +func (b *nodeMutateBuilder) WithControllerManager(mgr ctrl.Manager) framework.HandlerBuilder { + b.mgr = mgr + return b +} + +func (b *nodeMutateBuilder) Build() admission.Handler { + return NewNodeStatusMutatingHandler(b.mgr.GetClient(), admission.NewDecoder(b.mgr.GetScheme())) +} diff --git a/pkg/webhook/node/validating/validating_handler.go b/pkg/webhook/node/validating/validating_handler.go index e627f89d8..618057aa1 100644 --- a/pkg/webhook/node/validating/validating_handler.go +++ b/pkg/webhook/node/validating/validating_handler.go @@ -24,7 +24,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/koordinator-sh/koordinator/pkg/util" @@ -41,8 +40,11 @@ type NodeValidatingHandler struct { Decoder *admission.Decoder } -func NewNodeValidatingHandler() *NodeValidatingHandler { - handler := &NodeValidatingHandler{} +func NewNodeValidatingHandler(c client.Client, d *admission.Decoder) *NodeValidatingHandler { + handler := &NodeValidatingHandler{ + Client: c, + Decoder: d, + } return handler } @@ -108,7 +110,7 @@ func (h *NodeValidatingHandler) getPlugins() []plugins.NodePlugin { return []plugins.NodePlugin{nodesloconfig.NewPlugin(h.Decoder, h.Client)} } -var _ inject.Client = &NodeValidatingHandler{} +// var _ inject.Client = &NodeValidatingHandler{} // InjectClient injects the client into the ValidatingHandler func (h *NodeValidatingHandler) InjectClient(c client.Client) error { @@ -116,7 +118,7 @@ func (h *NodeValidatingHandler) InjectClient(c client.Client) error { return nil } -var _ admission.DecoderInjector = &NodeValidatingHandler{} +// var _ admission.DecoderInjector = &NodeValidatingHandler{} // InjectDecoder injects the decoder into the ValidatingHandler func (h *NodeValidatingHandler) InjectDecoder(d *admission.Decoder) error { diff --git a/pkg/webhook/node/validating/validating_handler_test.go b/pkg/webhook/node/validating/validating_handler_test.go index 2744ff102..6ae55ee5b 100644 --- a/pkg/webhook/node/validating/validating_handler_test.go +++ b/pkg/webhook/node/validating/validating_handler_test.go @@ -32,10 +32,8 @@ import ( func makeTestHandler() *NodeValidatingHandler { client := fake.NewClientBuilder().Build() sche := client.Scheme() - decoder, _ := admission.NewDecoder(sche) - handler := NewNodeValidatingHandler() - handler.InjectClient(client) - handler.InjectDecoder(decoder) + decoder := admission.NewDecoder(sche) + handler := NewNodeValidatingHandler(client, decoder) return handler } diff --git a/pkg/webhook/node/validating/webhooks.go b/pkg/webhook/node/validating/webhooks.go index 529f3a7cc..6f652858a 100644 --- a/pkg/webhook/node/validating/webhooks.go +++ b/pkg/webhook/node/validating/webhooks.go @@ -17,14 +17,33 @@ limitations under the License. package validating import ( + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/koordinator-sh/koordinator/pkg/webhook/util/framework" ) // +kubebuilder:webhook:path=/validate-node,mutating=false,failurePolicy=ignore,sideEffects=None,groups="",resources=nodes,verbs=create;update,versions=v1,name=vnode.koordinator.sh,admissionReviewVersions=v1;v1beta1 var ( - // HandlerMap contains admission webhook handlers - HandlerMap = map[string]admission.Handler{ - "validate-node": NewNodeValidatingHandler(), + // HandlerBuilderMap contains admission webhook handlers builder + HandlerBuilderMap = map[string]framework.HandlerBuilder{ + "validate-node": &nodeValidateBuilder{}, } ) + +var _ framework.HandlerBuilder = &nodeValidateBuilder{} + +type nodeValidateBuilder struct { + mgr manager.Manager +} + +func (b *nodeValidateBuilder) WithControllerManager(mgr ctrl.Manager) framework.HandlerBuilder { + b.mgr = mgr + return b +} + +func (b *nodeValidateBuilder) Build() admission.Handler { + return NewNodeValidatingHandler(b.mgr.GetClient(), admission.NewDecoder(b.mgr.GetScheme())) +} diff --git a/pkg/webhook/pod/mutating/cluster_colocation_profile_test.go b/pkg/webhook/pod/mutating/cluster_colocation_profile_test.go index b17a76572..154b44d9b 100644 --- a/pkg/webhook/pod/mutating/cluster_colocation_profile_test.go +++ b/pkg/webhook/pod/mutating/cluster_colocation_profile_test.go @@ -1660,7 +1660,7 @@ func TestClusterColocationProfileMutatingPod(t *testing.T) { assert := assert.New(t) client := fake.NewClientBuilder().Build() - decoder, _ := admission.NewDecoder(scheme.Scheme) + decoder := admission.NewDecoder(scheme.Scheme) handler := &PodMutatingHandler{ Client: client, Decoder: decoder, diff --git a/pkg/webhook/pod/mutating/extended_resource_spec_test.go b/pkg/webhook/pod/mutating/extended_resource_spec_test.go index d32bceea0..dfe2779e2 100644 --- a/pkg/webhook/pod/mutating/extended_resource_spec_test.go +++ b/pkg/webhook/pod/mutating/extended_resource_spec_test.go @@ -39,7 +39,7 @@ func TestExtendedResourceSpecMutatingPod(t *testing.T) { assert := assert.New(t) client := fake.NewClientBuilder().Build() - decoder, _ := admission.NewDecoder(scheme.Scheme) + decoder := admission.NewDecoder(scheme.Scheme) handler := &PodMutatingHandler{ Client: client, Decoder: decoder, diff --git a/pkg/webhook/pod/mutating/multi_quota_tree_affinity_test.go b/pkg/webhook/pod/mutating/multi_quota_tree_affinity_test.go index 081183c7e..30aecf20e 100644 --- a/pkg/webhook/pod/mutating/multi_quota_tree_affinity_test.go +++ b/pkg/webhook/pod/mutating/multi_quota_tree_affinity_test.go @@ -27,7 +27,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" - schedulingv1alpha1 "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + + schedulingv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/apis/extension" quotav1alpha1 "github.com/koordinator-sh/koordinator/apis/quota/v1alpha1" @@ -42,7 +43,7 @@ func init() { func TestAddNodeAffinityForMultiQuotaTree(t *testing.T) { handler, fakeInformers := makeTestHandler() - quotaInformer, err := fakeInformers.FakeInformerFor(&schedulingv1alpha1.ElasticQuota{}) + quotaInformer, err := fakeInformers.FakeInformerFor(context.TODO(), &schedulingv1alpha1.ElasticQuota{}) assert.NoError(t, err) profiles := []*quotav1alpha1.ElasticQuotaProfile{ diff --git a/pkg/webhook/pod/mutating/mutating_handler.go b/pkg/webhook/pod/mutating/mutating_handler.go index 539c3bd1f..93900639a 100644 --- a/pkg/webhook/pod/mutating/mutating_handler.go +++ b/pkg/webhook/pod/mutating/mutating_handler.go @@ -26,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -125,7 +124,7 @@ func (h *PodMutatingHandler) handleUpdate(ctx context.Context, req admission.Req return nil } -var _ inject.Client = &PodMutatingHandler{} +// var _ inject.Client = &PodMutatingHandler{} // InjectClient injects the client into the PodMutatingHandler func (h *PodMutatingHandler) InjectClient(c client.Client) error { @@ -133,7 +132,7 @@ func (h *PodMutatingHandler) InjectClient(c client.Client) error { return nil } -var _ admission.DecoderInjector = &PodMutatingHandler{} +// var _ admission.DecoderInjector = &PodMutatingHandler{} // InjectDecoder injects the decoder into the PodMutatingHandler func (h *PodMutatingHandler) InjectDecoder(d *admission.Decoder) error { diff --git a/pkg/webhook/pod/mutating/mutating_handler_test.go b/pkg/webhook/pod/mutating/mutating_handler_test.go index 0e93bc9a1..7e25698df 100644 --- a/pkg/webhook/pod/mutating/mutating_handler_test.go +++ b/pkg/webhook/pod/mutating/mutating_handler_test.go @@ -30,9 +30,9 @@ import ( sigcache "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/cache/informertest" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/webhook/elasticquota" ) @@ -44,7 +44,7 @@ func makeTestHandler() (*PodMutatingHandler, *informertest.FakeInformers) { Group: "scheduling.sigs.k8s.io", Version: "v1alpha1", }, &v1alpha1.ElasticQuota{}, &v1alpha1.ElasticQuotaList{}) - decoder, _ := admission.NewDecoder(sche) + decoder := admission.NewDecoder(sche) handler := &PodMutatingHandler{} handler.InjectClient(client) handler.InjectDecoder(decoder) @@ -137,7 +137,7 @@ func TestMutatingHandler(t *testing.T) { } } -var _ inject.Cache = &PodMutatingHandler{} +// var _ inject.Cache = &PodMutatingHandler{} func (h *PodMutatingHandler) InjectCache(cache sigcache.Cache) error { ctx := context.TODO() diff --git a/pkg/webhook/pod/mutating/webhooks.go b/pkg/webhook/pod/mutating/webhooks.go index 772a6fe32..6c89f682a 100644 --- a/pkg/webhook/pod/mutating/webhooks.go +++ b/pkg/webhook/pod/mutating/webhooks.go @@ -17,15 +17,37 @@ limitations under the License. package mutating import ( + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/koordinator-sh/koordinator/pkg/webhook/util/framework" ) // +kubebuilder:webhook:path=/mutate-pod,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups="",resources=pods,verbs=create,versions=v1,name=mpod-create.koordinator.sh // +kubebuilder:webhook:path=/mutate-pod,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups="",resources=pods,verbs=update,versions=v1,name=mpod-update.koordinator.sh var ( - // HandlerMap contains admission webhook handlers - HandlerMap = map[string]admission.Handler{ - "mutate-pod": &PodMutatingHandler{}, + // HandlerBuilderMap contains admission webhook handlers builder + HandlerBuilderMap = map[string]framework.HandlerBuilder{ + "mutate-pod": &podMutateBuilder{}, } ) + +var _ framework.HandlerBuilder = &podMutateBuilder{} + +type podMutateBuilder struct { + mgr manager.Manager +} + +func (b *podMutateBuilder) WithControllerManager(mgr ctrl.Manager) framework.HandlerBuilder { + b.mgr = mgr + return b +} + +func (b *podMutateBuilder) Build() admission.Handler { + return &PodMutatingHandler{ + Client: b.mgr.GetClient(), + Decoder: admission.NewDecoder(b.mgr.GetScheme()), + } +} diff --git a/pkg/webhook/pod/validating/cluster_colocation_profile_test.go b/pkg/webhook/pod/validating/cluster_colocation_profile_test.go index 843dbdf6f..dc3221cdd 100644 --- a/pkg/webhook/pod/validating/cluster_colocation_profile_test.go +++ b/pkg/webhook/pod/validating/cluster_colocation_profile_test.go @@ -448,7 +448,7 @@ func TestClusterColocationProfileValidatingPod(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := fake.NewClientBuilder().Build() - decoder, _ := admission.NewDecoder(scheme.Scheme) + decoder := admission.NewDecoder(scheme.Scheme) h := &PodValidatingHandler{ Client: client, Decoder: decoder, diff --git a/pkg/webhook/pod/validating/validating_handler.go b/pkg/webhook/pod/validating/validating_handler.go index 9ac8a252e..de2c3d88b 100644 --- a/pkg/webhook/pod/validating/validating_handler.go +++ b/pkg/webhook/pod/validating/validating_handler.go @@ -23,7 +23,6 @@ import ( admissionv1 "k8s.io/api/admission/v1" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/koordinator-sh/koordinator/pkg/webhook/elasticquota" @@ -83,7 +82,7 @@ func (h *PodValidatingHandler) Handle(ctx context.Context, req admission.Request return admission.ValidationResponse(allowed, reason) } -var _ inject.Client = &PodValidatingHandler{} +// var _ inject.Client = &PodValidatingHandler{} // InjectClient injects the client into the PodValidatingHandler func (h *PodValidatingHandler) InjectClient(c client.Client) error { @@ -91,7 +90,7 @@ func (h *PodValidatingHandler) InjectClient(c client.Client) error { return nil } -var _ admission.DecoderInjector = &PodValidatingHandler{} +// var _ admission.DecoderInjector = &PodValidatingHandler{} // InjectDecoder injects the decoder into the PodValidatingHandler func (h *PodValidatingHandler) InjectDecoder(d *admission.Decoder) error { diff --git a/pkg/webhook/pod/validating/validating_handler_test.go b/pkg/webhook/pod/validating/validating_handler_test.go index b2d5f1e43..d8fed0e14 100644 --- a/pkg/webhook/pod/validating/validating_handler_test.go +++ b/pkg/webhook/pod/validating/validating_handler_test.go @@ -29,11 +29,11 @@ import ( sigcache "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/cache/informertest" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" - pgfake "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned/fake" - "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions" + + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" + pgfake "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned/fake" + "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/informers/externalversions" "github.com/koordinator-sh/koordinator/pkg/webhook/elasticquota" ) @@ -45,7 +45,7 @@ func makeTestHandler() *PodValidatingHandler { Group: "scheduling.sigs.k8s.io", Version: "v1alpha1", }, &v1alpha1.ElasticQuota{}, &v1alpha1.ElasticQuotaList{}) - decoder, _ := admission.NewDecoder(sche) + decoder := admission.NewDecoder(sche) handler := &PodValidatingHandler{} handler.InjectClient(client) handler.InjectDecoder(decoder) @@ -138,7 +138,7 @@ func TestValidatingHandler(t *testing.T) { } } -var _ inject.Cache = &PodValidatingHandler{} +// var _ inject.Cache = &PodValidatingHandler{} func (h *PodValidatingHandler) InjectCache(cache sigcache.Cache) error { ctx := context.TODO() diff --git a/pkg/webhook/pod/validating/verify_annotations_test.go b/pkg/webhook/pod/validating/verify_annotations_test.go index 7c73e32d8..7c18ce69d 100644 --- a/pkg/webhook/pod/validating/verify_annotations_test.go +++ b/pkg/webhook/pod/validating/verify_annotations_test.go @@ -67,7 +67,7 @@ func TestClusterReservationValidatingPod(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := fake.NewClientBuilder().Build() - decoder, _ := admission.NewDecoder(scheme.Scheme) + decoder := admission.NewDecoder(scheme.Scheme) h := &PodValidatingHandler{ Client: client, Decoder: decoder, diff --git a/pkg/webhook/pod/validating/webhooks.go b/pkg/webhook/pod/validating/webhooks.go index 92a80c069..c38a1eecd 100644 --- a/pkg/webhook/pod/validating/webhooks.go +++ b/pkg/webhook/pod/validating/webhooks.go @@ -17,14 +17,36 @@ limitations under the License. package validating import ( + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/koordinator-sh/koordinator/pkg/webhook/util/framework" ) // +kubebuilder:webhook:path=/validate-pod,mutating=false,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups="",resources=pods,verbs=create;update,versions=v1,name=vpod.koordinator.sh var ( // HandlerMap contains admission webhook handlers - HandlerMap = map[string]admission.Handler{ - "validate-pod": &PodValidatingHandler{}, + HandlerBuilderMap = map[string]framework.HandlerBuilder{ + "validate-pod": &podValidateBuilder{}, } ) + +var _ framework.HandlerBuilder = &podValidateBuilder{} + +type podValidateBuilder struct { + mgr manager.Manager +} + +func (b *podValidateBuilder) WithControllerManager(mgr ctrl.Manager) framework.HandlerBuilder { + b.mgr = mgr + return b +} + +func (b *podValidateBuilder) Build() admission.Handler { + return &PodValidatingHandler{ + Client: b.mgr.GetClient(), + Decoder: admission.NewDecoder(b.mgr.GetScheme()), + } +} diff --git a/pkg/webhook/server.go b/pkg/webhook/server.go index 5ffc1b7c5..01a1e94c3 100644 --- a/pkg/webhook/server.go +++ b/pkg/webhook/server.go @@ -32,19 +32,21 @@ import ( webhookutil "github.com/koordinator-sh/koordinator/pkg/webhook/util" webhookcontroller "github.com/koordinator-sh/koordinator/pkg/webhook/util/controller" + "github.com/koordinator-sh/koordinator/pkg/webhook/util/framework" "github.com/koordinator-sh/koordinator/pkg/webhook/util/health" ) type GateFunc func() (enabled bool) var ( - // HandlerMap contains all admission webhook handlers. - HandlerMap = map[string]admission.Handler{} - handlerGates = map[string]GateFunc{} + // handlerMap contains all admission webhook handlers. + handlerMap = map[string]admission.Handler{} + handlerGates = map[string]GateFunc{} + HandlerBuilderMap = map[string]framework.HandlerBuilder{} ) -func addHandlersWithGate(m map[string]admission.Handler, fn GateFunc) { - for path, handler := range m { +func addHandlersWithGate(m map[string]framework.HandlerBuilder, fn GateFunc) { + for path, handlerBuilder := range m { if len(path) == 0 { klog.Warningf("Skip handler with empty path.") continue @@ -52,11 +54,11 @@ func addHandlersWithGate(m map[string]admission.Handler, fn GateFunc) { if path[0] != '/' { path = "/" + path } - _, found := HandlerMap[path] + _, found := HandlerBuilderMap[path] if found { klog.V(1).Infof("conflicting webhook builder path %v in handler map", path) } - HandlerMap[path] = handler + HandlerBuilderMap[path] = handlerBuilder if fn != nil { handlerGates[path] = fn } @@ -65,7 +67,7 @@ func addHandlersWithGate(m map[string]admission.Handler, fn GateFunc) { func filterActiveHandlers() { disablePaths := sets.NewString() - for path := range HandlerMap { + for path := range HandlerBuilderMap { if fn, ok := handlerGates[path]; ok { if !fn() { disablePaths.Insert(path) @@ -73,25 +75,32 @@ func filterActiveHandlers() { } } for _, path := range disablePaths.List() { - delete(HandlerMap, path) + delete(HandlerBuilderMap, path) } } +func SetupWithWebhookOpt(opt *manager.Options) { + opt.WebhookServer = webhook.NewServer(webhook.Options{ + Host: "0.0.0.0", + Port: webhookutil.GetPort(), + CertDir: webhookutil.GetCertDir(), + }) +} + func SetupWithManager(mgr manager.Manager) error { server := mgr.GetWebhookServer() - server.Host = "0.0.0.0" - server.Port = webhookutil.GetPort() - server.CertDir = webhookutil.GetCertDir() // register admission handlers filterActiveHandlers() - for path, handler := range HandlerMap { + for path, handlerBuilder := range HandlerBuilderMap { + handler := handlerBuilder.WithControllerManager(mgr).Build() server.Register(path, &webhook.Admission{Handler: handler}) + handlerMap[path] = handler klog.V(3).Infof("Registered webhook handler %s", path) } // register conversion webhook - server.Register("/convert", &conversion.Webhook{}) + server.Register("/convert", conversion.NewWebhookHandler(mgr.GetScheme())) // register health handler server.Register("/healthz", &health.Handler{}) @@ -106,7 +115,7 @@ func SetupWithManager(mgr manager.Manager) error { // +kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=validatingwebhookconfigurations,verbs=get;list;watch;update;patch func Initialize(ctx context.Context, cfg *rest.Config) error { - c, err := webhookcontroller.New(cfg, HandlerMap) + c, err := webhookcontroller.New(cfg, handlerMap) if err != nil { return err } diff --git a/pkg/webhook/util/framework/builder.go b/pkg/webhook/util/framework/builder.go new file mode 100644 index 000000000..37f92f486 --- /dev/null +++ b/pkg/webhook/util/framework/builder.go @@ -0,0 +1,27 @@ +/* +Copyright 2022 The Koordinator Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +type HandlerBuilder interface { + WithControllerManager(mgr ctrl.Manager) HandlerBuilder + Build() admission.Handler +} diff --git a/test/e2e/framework/config/config.go b/test/e2e/framework/config/config.go index 841d67d06..907690e94 100644 --- a/test/e2e/framework/config/config.go +++ b/test/e2e/framework/config/config.go @@ -23,13 +23,13 @@ limitations under the License. // The command line flags all get stored in a private flag set. The // developer of the E2E test suite decides how they are exposed. Options // include: -// - exposing as normal flags in the actual command line: -// CopyFlags(Flags, flag.CommandLine) -// - populate via test/e2e/framework/viperconfig: -// viperconfig.ViperizeFlags("my-config.yaml", "", Flags) -// - a combination of both: -// CopyFlags(Flags, flag.CommandLine) -// viperconfig.ViperizeFlags("my-config.yaml", "", flag.CommandLine) +// - exposing as normal flags in the actual command line: +// CopyFlags(Flags, flag.CommandLine) +// - populate via test/e2e/framework/viperconfig: +// viperconfig.ViperizeFlags("my-config.yaml", "", Flags) +// - a combination of both: +// CopyFlags(Flags, flag.CommandLine) +// viperconfig.ViperizeFlags("my-config.yaml", "", flag.CommandLine) // // Instead of defining flags one-by-one, test developers annotate a // structure with tags and then call a single function. This is the @@ -39,16 +39,16 @@ limitations under the License. // // For example, a file storage/csi.go might define: // -// var scaling struct { -// NumNodes int `default:"1" description:"number of nodes to run on"` -// Master string -// } -// _ = config.AddOptions(&scaling, "storage.csi.scaling") +// var scaling struct { +// NumNodes int `default:"1" description:"number of nodes to run on"` +// Master string +// } +// _ = config.AddOptions(&scaling, "storage.csi.scaling") // // This defines the following command line flags: // -// -storage.csi.scaling.numNodes= - number of nodes to run on (default: 1) -// -storage.csi.scaling.master= +// -storage.csi.scaling.numNodes= - number of nodes to run on (default: 1) +// -storage.csi.scaling.master= // // All fields in the structure must be exported and have one of the following // types (same as in the `flag` package): @@ -64,10 +64,10 @@ limitations under the License. // // Each basic entry may have a tag with these optional keys: // -// usage: additional explanation of the option -// default: the default value, in the same format as it would -// be given on the command line and true/false for -// a boolean +// usage: additional explanation of the option +// default: the default value, in the same format as it would +// be given on the command line and true/false for +// a boolean // // The names of the final configuration options are a combination of an // optional common prefix for all options in the structure and the diff --git a/test/e2e/framework/log.go b/test/e2e/framework/log.go index 7033ee62c..0d7f6a8cf 100644 --- a/test/e2e/framework/log.go +++ b/test/e2e/framework/log.go @@ -72,10 +72,10 @@ var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/`) // entries coming from Ginkgo. // // This is a modified copy of PruneStack in https://github.com/onsi/ginkgo/blob/f90f37d87fa6b1dd9625e2b1e83c23ffae3de228/internal/codelocation/code_location.go#L25: -// - simplified API and thus renamed (calls debug.Stack() instead of taking a parameter) -// - source code filtering updated to be specific to Kubernetes -// - optimized to use bytes and in-place slice filtering from -// https://github.com/golang/go/wiki/SliceTricks#filter-in-place +// - simplified API and thus renamed (calls debug.Stack() instead of taking a parameter) +// - source code filtering updated to be specific to Kubernetes +// - optimized to use bytes and in-place slice filtering from +// https://github.com/golang/go/wiki/SliceTricks#filter-in-place func PrunedStack(skip int) []byte { fullStackTrace := debug.Stack() stack := bytes.Split(fullStackTrace, []byte("\n")) diff --git a/test/e2e/framework/log_test.go b/test/e2e/framework/log_test.go index 25b41fa99..d084efb20 100644 --- a/test/e2e/framework/log_test.go +++ b/test/e2e/framework/log_test.go @@ -183,10 +183,15 @@ var functionArgs = regexp.MustCompile(`([[:alpha:]]+)\(.*\)`) // testFailureOutput matches TestFailureOutput() and its source followed by additional stack entries: // // github.com/koordinator-sh/koordinator/test/e2e/framework_test.TestFailureOutput(0xc000558800) +// // /nvme/gopath/src/github.com/koordinator-sh/koordinator/test/e2e/framework/log/log_test.go:73 +0x1c9 +// // testing.tRunner(0xc000558800, 0x1af2848) -// /nvme/gopath/go/src/testing/testing.go:865 +0xc0 +// +// /nvme/gopath/go/src/testing/testing.go:865 +0xc0 +// // created by testing.(*T).Run +// // /nvme/gopath/go/src/testing/testing.go:916 +0x35a var testFailureOutput = regexp.MustCompile(`(?m)^github.com/koordinator-sh/koordinator/test/e2e/framework_test\.TestFailureOutput\(.*\n\t.*(\n.*\n\t.*)*`) diff --git a/test/e2e/framework/node/wait_test.go b/test/e2e/framework/node/wait_test.go index 174d55016..4c05d9ef4 100644 --- a/test/e2e/framework/node/wait_test.go +++ b/test/e2e/framework/node/wait_test.go @@ -169,6 +169,9 @@ func TestCheckReadyForTests(t *testing.T) { }) checkFunc := CheckReadyForTests(c, tc.nonblockingTaints, tc.allowedNotReadyNodes, testLargeClusterThreshold) out, err := checkFunc() + for i := 0; i < 3; i++ { + out, err = checkFunc() + } if out != tc.expected { t.Errorf("Expected %v but got %v", tc.expected, out) } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 38268f548..f44a398bf 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -1345,18 +1345,22 @@ func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool { // WatchEventSequenceVerifier ... // manages a watch for a given resource, ensures that events take place in a given order, retries the test on failure -// testContext cancelation signal across API boundries, e.g: context.TODO() -// dc sets up a client to the API -// resourceType specify the type of resource -// namespace select a namespace -// resourceName the name of the given resource -// listOptions options used to find the resource, recommended to use listOptions.labelSelector -// expectedWatchEvents array of events which are expected to occur -// scenario the test itself -// retryCleanup a function to run which ensures that there are no dangling resources upon test failure +// +// testContext cancelation signal across API boundries, e.g: context.TODO() +// dc sets up a client to the API +// resourceType specify the type of resource +// namespace select a namespace +// resourceName the name of the given resource +// listOptions options used to find the resource, recommended to use listOptions.labelSelector +// expectedWatchEvents array of events which are expected to occur +// scenario the test itself +// retryCleanup a function to run which ensures that there are no dangling resources upon test failure +// // this tooling relies on the test to return the events as they occur // the entire scenario must be run to ensure that the desired watch events arrive in order (allowing for interweaving of watch events) -// if an expected watch event is missing we elect to clean up and run the entire scenario again +// +// if an expected watch event is missing we elect to clean up and run the entire scenario again +// // we try the scenario three times to allow the sequencing to fail a couple of times func WatchEventSequenceVerifier(ctx context.Context, dc dynamic.Interface, resourceType schema.GroupVersionResource, namespace string, resourceName string, listOptions metav1.ListOptions, expectedWatchEvents []watch.Event, scenario func(*watchtools.RetryWatcher) []watch.Event, retryCleanup func() error) { listWatcher := &cache.ListWatch{ diff --git a/test/e2e/generated/bindata.go b/test/e2e/generated/bindata.go index abd23f2d9..33934f512 100644 --- a/test/e2e/generated/bindata.go +++ b/test/e2e/generated/bindata.go @@ -8604,11 +8604,13 @@ var _bindata = map[string]func() (*asset, error){ // directory embedded in the file by go-bindata. // For example if you run go-bindata on data/... and data contains the // following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png +// +// data/ +// foo.txt +// img/ +// a.png +// b.png +// // then AssetDir("data") would return []string{"foo.txt", "img"} // AssetDir("data/img") would return []string{"a.png", "b.png"} // AssetDir("foo.txt") and AssetDir("notexist") would return an error diff --git a/test/e2e/quota/multi_tree.go b/test/e2e/quota/multi_tree.go index 3adcf8c87..65c51d800 100644 --- a/test/e2e/quota/multi_tree.go +++ b/test/e2e/quota/multi_tree.go @@ -27,15 +27,16 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" quotav1 "k8s.io/apiserver/pkg/quota/v1" - schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" + + schedclientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" "github.com/koordinator-sh/koordinator/apis/extension" quotav1alpha1 "github.com/koordinator-sh/koordinator/apis/quota/v1alpha1" + schedv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/util" "github.com/koordinator-sh/koordinator/test/e2e/framework" e2enode "github.com/koordinator-sh/koordinator/test/e2e/framework/node" imageutils "github.com/koordinator-sh/koordinator/test/utils/image" - schedv1alpha1 "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" ) var QuotaE2eLabel = "koord-quota-e2e" diff --git a/test/e2e/quota/quota.go b/test/e2e/quota/quota.go index 215cbfad8..e0bf0db07 100644 --- a/test/e2e/quota/quota.go +++ b/test/e2e/quota/quota.go @@ -26,14 +26,15 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" + + schedclientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" "github.com/koordinator-sh/koordinator/apis/extension" + schedv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/util" "github.com/koordinator-sh/koordinator/test/e2e/framework" e2enode "github.com/koordinator-sh/koordinator/test/e2e/framework/node" e2epod "github.com/koordinator-sh/koordinator/test/e2e/framework/pod" - schedv1alpha1 "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" ) var _ = SIGDescribe("basic-quota", func() { diff --git a/test/e2e/quota/quota_guaranteed.go b/test/e2e/quota/quota_guaranteed.go index f5c1247e9..229aefeab 100644 --- a/test/e2e/quota/quota_guaranteed.go +++ b/test/e2e/quota/quota_guaranteed.go @@ -29,14 +29,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" quotav1 "k8s.io/apiserver/pkg/quota/v1" apiv1 "k8s.io/kubernetes/pkg/api/v1/pod" - schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned" + + schedclientset "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/generated/clientset/versioned" "github.com/koordinator-sh/koordinator/apis/extension" + schedv1alpha1 "github.com/koordinator-sh/koordinator/apis/thirdparty/scheduler-plugins/pkg/apis/scheduling/v1alpha1" "github.com/koordinator-sh/koordinator/pkg/util" "github.com/koordinator-sh/koordinator/test/e2e/framework" e2enode "github.com/koordinator-sh/koordinator/test/e2e/framework/node" e2epod "github.com/koordinator-sh/koordinator/test/e2e/framework/pod" - schedv1alpha1 "sigs.k8s.io/scheduler-plugins/pkg/apis/scheduling/v1alpha1" ) var _ = SIGDescribe("quota-guaranteed", func() { diff --git a/test/e2e/scheduling/deviceshare.go b/test/e2e/scheduling/deviceshare.go index 9f32f1ff4..468b4ed61 100644 --- a/test/e2e/scheduling/deviceshare.go +++ b/test/e2e/scheduling/deviceshare.go @@ -158,7 +158,7 @@ var _ = SIGDescribe("DeviceShare", func() { reservationRequests := reservationutil.ReservationRequests(reservation) gomega.Expect(reservation.Status.Allocatable).Should(gomega.Equal(reservationRequests)) - podRequests, _ := resourceapi.PodRequestsAndLimits(podUsingReservation) + podRequests := resourceapi.PodRequests(podUsingReservation, resourceapi.PodResourcesOptions{}) podRequests = quotav1.Mask(podRequests, quotav1.ResourceNames(reservation.Status.Allocatable)) gomega.Expect(reservation.Status.Allocated).Should(gomega.Equal(podRequests)) gomega.Expect(reservation.Status.CurrentOwners).Should(gomega.Equal([]corev1.ObjectReference{ @@ -281,7 +281,7 @@ var _ = SIGDescribe("DeviceShare", func() { reservationRequests := reservationutil.ReservationRequests(reservation) gomega.Expect(reservation.Status.Allocatable).Should(gomega.Equal(reservationRequests)) - podRequests, _ := resourceapi.PodRequestsAndLimits(podUsingReservation) + podRequests := resourceapi.PodRequests(podUsingReservation, resourceapi.PodResourcesOptions{}) podRequests = quotav1.Mask(podRequests, quotav1.ResourceNames(reservation.Status.Allocatable)) gomega.Expect(reservation.Status.Allocated).Should(gomega.Equal(podRequests)) gomega.Expect(reservation.Status.CurrentOwners).Should(gomega.Equal([]corev1.ObjectReference{ @@ -404,7 +404,7 @@ var _ = SIGDescribe("DeviceShare", func() { reservationRequests := reservationutil.ReservationRequests(reservation) gomega.Expect(reservation.Status.Allocatable).Should(gomega.Equal(reservationRequests)) - podRequests, _ := resourceapi.PodRequestsAndLimits(podUsingReservation) + podRequests := resourceapi.PodRequests(podUsingReservation, resourceapi.PodResourcesOptions{}) podRequests = quotav1.Mask(podRequests, quotav1.ResourceNames(reservation.Status.Allocatable)) gomega.Expect(reservation.Status.Allocated).Should(gomega.Equal(podRequests)) gomega.Expect(reservation.Status.CurrentOwners).Should(gomega.Equal([]corev1.ObjectReference{ @@ -545,7 +545,7 @@ var _ = SIGDescribe("DeviceShare", func() { var totalRequests corev1.ResourceList var currentOwners []corev1.ObjectReference for _, pod := range podUsingReservations { - podRequests, _ := resourceapi.PodRequestsAndLimits(pod) + podRequests := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}) podRequests = quotav1.Mask(podRequests, quotav1.ResourceNames(reservation.Status.Allocatable)) totalRequests = quotav1.Add(totalRequests, podRequests) currentOwners = append(currentOwners, corev1.ObjectReference{ @@ -671,7 +671,7 @@ var _ = SIGDescribe("DeviceShare", func() { var totalRequests corev1.ResourceList var currentOwners []corev1.ObjectReference for _, pod := range podUsingReservations { - podRequests, _ := resourceapi.PodRequestsAndLimits(pod) + podRequests := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}) podRequests = quotav1.Mask(podRequests, quotav1.ResourceNames(reservation.Status.Allocatable)) totalRequests = quotav1.Add(totalRequests, podRequests) currentOwners = append(currentOwners, corev1.ObjectReference{ @@ -836,7 +836,7 @@ var _ = SIGDescribe("DeviceShare", func() { var totalRequests corev1.ResourceList var currentOwners []corev1.ObjectReference for _, pod := range podUsingReservations { - podRequests, _ := resourceapi.PodRequestsAndLimits(pod) + podRequests := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}) podRequests = quotav1.Mask(podRequests, quotav1.ResourceNames(reservation.Status.Allocatable)) totalRequests = quotav1.Add(totalRequests, podRequests) currentOwners = append(currentOwners, corev1.ObjectReference{ @@ -973,7 +973,7 @@ var _ = SIGDescribe("DeviceShare", func() { var totalRequests corev1.ResourceList var currentOwners []corev1.ObjectReference - podRequests, _ := resourceapi.PodRequestsAndLimits(pod) + podRequests := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}) podRequests = quotav1.Mask(podRequests, quotav1.ResourceNames(reservation.Status.Allocatable)) totalRequests = quotav1.Add(totalRequests, podRequests) currentOwners = append(currentOwners, corev1.ObjectReference{ diff --git a/test/e2e/scheduling/reservation.go b/test/e2e/scheduling/reservation.go index ba9dbdd2f..15caa6fee 100644 --- a/test/e2e/scheduling/reservation.go +++ b/test/e2e/scheduling/reservation.go @@ -109,7 +109,7 @@ var _ = SIGDescribe("Reservation", func() { reservationRequests := reservationutil.ReservationRequests(reservation) gomega.Expect(reservation.Status.Allocatable).Should(gomega.Equal(reservationRequests)) - podRequests, _ := resourceapi.PodRequestsAndLimits(pod) + podRequests := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}) podRequests = quotav1.Mask(podRequests, quotav1.ResourceNames(reservation.Status.Allocatable)) gomega.Expect(reservation.Status.Allocated).Should(gomega.Equal(podRequests)) gomega.Expect(reservation.Status.CurrentOwners).Should(gomega.Equal([]corev1.ObjectReference{ @@ -193,7 +193,7 @@ var _ = SIGDescribe("Reservation", func() { reservationRequests := reservationutil.ReservationRequests(r) gomega.Expect(r.Status.Allocatable).Should(gomega.Equal(reservationRequests)) - podRequests, _ := resourceapi.PodRequestsAndLimits(pod) + podRequests := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}) podRequests = quotav1.Mask(podRequests, quotav1.ResourceNames(r.Status.Allocatable)) for k, v := range podRequests { vv := v.DeepCopy() @@ -609,7 +609,7 @@ var _ = SIGDescribe("Reservation", func() { var totalRequests corev1.ResourceList var currentOwners []corev1.ObjectReference for _, pod := range podUsingReservations { - podRequests, _ := resourceapi.PodRequestsAndLimits(pod) + podRequests := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}) podRequests = quotav1.Mask(podRequests, quotav1.ResourceNames(reservation.Status.Allocatable)) totalRequests = quotav1.Add(totalRequests, podRequests) currentOwners = append(currentOwners, corev1.ObjectReference{ @@ -774,7 +774,7 @@ var _ = SIGDescribe("Reservation", func() { var totalRequests corev1.ResourceList var currentOwners []corev1.ObjectReference for _, pod := range podUsingReservations { - podRequests, _ := resourceapi.PodRequestsAndLimits(pod) + podRequests := resourceapi.PodRequests(pod, resourceapi.PodResourcesOptions{}) podRequests = quotav1.Mask(podRequests, quotav1.ResourceNames(reservation.Status.Allocatable)) totalRequests = quotav1.Add(totalRequests, podRequests) currentOwners = append(currentOwners, corev1.ObjectReference{